From 2b7052861857f11c3ff73773f0bd05d139115401 Mon Sep 17 00:00:00 2001 From: havelight-ee Date: Mon, 6 Feb 2023 15:01:53 +0900 Subject: [PATCH] update --- README.md | 3 + ansible.cfg | 5 + cmoa_install.yaml | 13 + inventory | 22 + roles/cmoa_demo_install/defaults/main.yml | 64 + .../files/00-default/sa_patch.sh | 8 + .../files/00-default/secret_dockerhub.yaml | 7 + .../files/00-default/secret_nexus.yaml | 8 + .../files/01-storage/00-storageclass.yaml | 6 + .../files/01-storage/01-persistentvolume.yaml | 92 + .../files/01-storage/cmoa_minio | 63 + .../files/01-storage/minio/.helmignore | 23 + .../files/01-storage/minio/Chart.yaml | 18 + .../files/01-storage/minio/README.md | 235 ++ .../01-storage/minio/templates/NOTES.txt | 43 + .../minio/templates/_helper_create_bucket.txt | 109 + .../minio/templates/_helper_create_policy.txt | 75 + .../minio/templates/_helper_create_user.txt | 88 + .../templates/_helper_custom_command.txt | 58 + .../minio/templates/_helper_policy.tpl | 18 + .../01-storage/minio/templates/_helpers.tpl | 218 ++ .../01-storage/minio/templates/configmap.yaml | 24 + .../minio/templates/console-ingress.yaml | 58 + .../minio/templates/console-service.yaml | 48 + .../minio/templates/deployment.yaml | 174 + .../minio/templates/gateway-deployment.yaml | 173 + .../01-storage/minio/templates/ingress.yaml | 58 + .../minio/templates/networkpolicy.yaml | 27 + .../minio/templates/poddisruptionbudget.yaml | 14 + .../post-install-create-bucket-job.yaml | 87 + .../post-install-create-policy-job.yaml | 87 + .../post-install-create-user-job.yaml | 97 + .../post-install-custom-command.yaml | 87 + .../files/01-storage/minio/templates/pvc.yaml | 35 + .../01-storage/minio/templates/secrets.yaml | 22 + .../templates/securitycontextconstraints.yaml | 45 + .../01-storage/minio/templates/service.yaml | 49 + .../minio/templates/serviceaccount.yaml | 7 + .../minio/templates/servicemonitor.yaml | 51 + .../minio/templates/statefulset.yaml | 217 ++ .../files/01-storage/minio/values.yaml | 461 +++ .../files/02-base/00-kafka-broker-config.yaml | 161 + .../files/02-base/01-coredns.yaml | 35 + .../files/02-base/base/.helmignore | 22 + .../files/02-base/base/Chart.yaml | 5 + .../02-base/base/charts/analysis/.helmignore | 22 + .../02-base/base/charts/analysis/Chart.yaml | 5 + .../imxc-metric-analyzer-master.yaml | 87 + .../imxc-metric-analyzer-worker.yaml | 38 + .../02-base/base/charts/analysis/values.yaml | 68 + .../02-base/base/charts/cortex/.helmignore | 29 + .../02-base/base/charts/cortex/Chart.lock | 24 + .../02-base/base/charts/cortex/Chart.yaml | 56 + .../02-base/base/charts/cortex/README.md | 754 ++++ .../base/charts/cortex/templates/NOTES.txt | 9 + .../base/charts/cortex/templates/_helpers.tpl | 155 + .../alertmanager/alertmanager-dep.yaml | 30 + .../alertmanager/alertmanager-svc.yaml | 10 + .../charts/cortex/templates/clusterrole.yaml | 12 + .../cortex/templates/clusterrolebinding.yaml | 16 + .../compactor/_helpers-compactor.tpl | 23 + .../compactor-poddisruptionbudget.yaml | 14 + .../compactor/compactor-servicemonitor.yaml | 42 + .../compactor/compactor-statefulset.yaml | 141 + .../templates/compactor/compactor-svc.yaml | 25 + .../charts/cortex/templates/configmap.yaml | 12 + .../templates/configs/_helpers-configs.tpl | 23 + .../cortex/templates/configs/configs-dep.yaml | 124 + .../configs/configs-poddisruptionbudget.yaml | 14 + .../configs/configs-servicemonitor.yaml | 42 + .../cortex/templates/configs/configs-svc.yaml | 23 + .../charts/cortex/templates/cortex-pv.yaml | 68 + .../distributor/_helpers-distributor.tpl | 23 + .../distributor/distributor-dep.yaml | 121 + .../distributor/distributor-hpa.yaml | 39 + .../distributor-poddisruptionbudget.yaml | 14 + .../distributor-servicemonitor.yaml | 42 + .../distributor/distributor-svc-headless.yaml | 23 + .../distributor/distributor-svc.yaml | 21 + .../templates/ingester/_helpers-ingester.tpl | 23 + .../templates/ingester/ingester-dep.yaml | 130 + .../templates/ingester/ingester-hpa.yaml | 29 + .../ingester-poddisruptionbudget.yaml | 14 + .../ingester/ingester-servicemonitor.yaml | 42 + .../ingester/ingester-statefulset.yaml | 153 + .../ingester/ingester-svc-headless.yaml | 22 + .../templates/ingester/ingester-svc.yaml | 21 + .../cortex/templates/nginx/_helpers-nginx.tpl | 23 + .../cortex/templates/nginx/nginx-config.yaml | 140 + .../cortex/templates/nginx/nginx-dep.yaml | 111 + .../cortex/templates/nginx/nginx-hpa.yaml | 39 + .../cortex/templates/nginx/nginx-ingress.yaml | 40 + .../nginx/nginx-poddisruptionbudget.yaml | 14 + .../cortex/templates/nginx/nginx-svc.yaml | 23 + .../cortex/templates/node-exporter.yaml | 96 + .../templates/querier/_helpers-querier.tpl | 23 + .../cortex/templates/querier/querier-dep.yaml | 115 + .../cortex/templates/querier/querier-hpa.yaml | 39 + .../querier/querier-poddisruptionbudget.yaml | 14 + .../querier/querier-servicemonitor.yaml | 42 + .../cortex/templates/querier/querier-svc.yaml | 21 + .../_helpers-query-frontend.tpl | 23 + .../query-frontend/query-frontend-dep.yaml | 107 + .../query-frontend-servicemonitor.yaml | 42 + .../query-frontend-svc-headless.yaml | 23 + .../query-frontend/query-frontend-svc.yaml | 21 + .../query-poddisruptionbudget.yaml | 14 + .../cortex/templates/ruler/_helpers-ruler.tpl | 30 + .../templates/ruler/ruler-configmap.yaml | 14 + .../cortex/templates/ruler/ruler-dep.yaml | 191 + .../ruler/ruler-poddisruptionbudget.yaml | 14 + .../templates/ruler/ruler-servicemonitor.yaml | 42 + .../cortex/templates/ruler/ruler-svc.yaml | 23 + .../cortex/templates/runtime-configmap.yaml | 18 + .../cortex/templates/secret-postgresql.yaml | 11 + .../base/charts/cortex/templates/secret.yaml | 11 + .../cortex/templates/serviceaccount.yaml | 12 + .../store-gateway/_helpers-store-gateway.tpl | 23 + .../store-gateway-poddisruptionbudget.yaml | 14 + .../store-gateway-servicemonitor.yaml | 42 + .../store-gateway-statefulset.yaml | 142 + .../store-gateway-svc-headless.yaml | 24 + .../store-gateway/store-gateway-svc.yaml | 23 + .../templates/svc-memberlist-headless.yaml | 18 + .../table-manager/_helpers-table-manager.tpl | 23 + .../table-manager/table-manager-dep.yaml | 106 + .../table-manager-poddisruptionbudget.yaml | 14 + .../table-manager-servicemonitor.yaml | 42 + .../table-manager/table-manager-svc.yaml | 23 + .../02-base/base/charts/cortex/values.yaml | 1605 +++++++++ .../base/charts/elasticsearch/.helmignore | 2 + .../base/charts/elasticsearch/Chart.yaml | 12 + .../templates/1.headless_service.yaml | 14 + .../elasticsearch/templates/2.service.yaml | 17 + .../elasticsearch/templates/3.configmap.yaml | 41 + .../charts/elasticsearch/templates/4.pv.yaml | 74 + .../charts/elasticsearch/templates/5.pvc.yaml | 53 + .../templates/6.statefulset.yaml | 146 + .../elasticsearch/templates/7.secrets.yaml | 10 + .../templates/needtocheck_storageclass.yaml | 8 + .../base/charts/elasticsearch/values.yaml | 68 + .../base/charts/kafka-manager/.helmignore | 22 + .../base/charts/kafka-manager/Chart.yaml | 5 + .../templates/0.kafka-manager-service.yaml | 14 + .../templates/1.kafka-manager.yaml | 33 + .../base/charts/kafka-manager/values.yaml | 68 + .../02-base/base/charts/kafka/.helmignore | 22 + .../base/charts/kafka/1.broker-config.yaml | 161 + .../02-base/base/charts/kafka/Chart.yaml | 5 + .../base/charts/kafka/templates/2.dns.yaml | 14 + .../kafka/templates/3.bootstrap-service.yaml | 11 + .../kafka/templates/4.persistent-volume.yaml | 76 + .../base/charts/kafka/templates/5.kafka.yaml | 132 + .../charts/kafka/templates/6.outside.yaml | 89 + .../02-base/base/charts/kafka/values.yaml | 68 + .../02-base/base/charts/postgres/.helmignore | 22 + .../02-base/base/charts/postgres/Chart.yaml | 5 + .../templates/1.postgres-configmap.yaml | 11 + .../templates/2.postgres-storage.yaml | 38 + .../templates/3.postgres-service.yaml | 14 + .../templates/4.postgres-deployment.yaml | 45 + .../02-base/base/charts/postgres/values.yaml | 68 + .../02-base/base/charts/rabbitmq/.helmignore | 21 + .../02-base/base/charts/rabbitmq/Chart.lock | 6 + .../02-base/base/charts/rabbitmq/Chart.yaml | 26 + .../02-base/base/charts/rabbitmq/README.md | 566 +++ .../charts/rabbitmq/charts/common/.helmignore | 22 + .../charts/rabbitmq/charts/common/Chart.yaml | 23 + .../charts/rabbitmq/charts/common/README.md | 327 ++ .../charts/common/templates/_affinities.tpl | 102 + .../charts/common/templates/_capabilities.tpl | 117 + .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 55 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 129 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../charts/rabbitmq/charts/common/values.yaml | 5 + .../charts/rabbitmq/ci/default-values.yaml | 1 + .../rabbitmq/ci/tolerations-values.yaml | 4 + .../base/charts/rabbitmq/templates/NOTES.txt | 167 + .../charts/rabbitmq/templates/_helpers.tpl | 247 ++ .../rabbitmq/templates/configuration.yaml | 16 + .../charts/rabbitmq/templates/extra-list.yaml | 4 + .../charts/rabbitmq/templates/ingress.yaml | 57 + .../rabbitmq/templates/networkpolicy.yaml | 37 + .../base/charts/rabbitmq/templates/pdb.yaml | 20 + .../rabbitmq/templates/prometheusrule.yaml | 24 + .../base/charts/rabbitmq/templates/pv.yaml | 22 + .../base/charts/rabbitmq/templates/pvc.yaml | 15 + .../base/charts/rabbitmq/templates/role.yaml | 18 + .../rabbitmq/templates/rolebinding.yaml | 18 + .../charts/rabbitmq/templates/secrets.yaml | 43 + .../rabbitmq/templates/serviceaccount.yaml | 14 + .../rabbitmq/templates/servicemonitor.yaml | 49 + .../rabbitmq/templates/statefulset.yaml | 382 ++ .../rabbitmq/templates/svc-headless.yaml | 40 + .../base/charts/rabbitmq/templates/svc.yaml | 95 + .../rabbitmq/templates/tls-secrets.yaml | 74 + .../base/charts/rabbitmq/values.schema.json | 100 + .../02-base/base/charts/rabbitmq/values.yaml | 1151 ++++++ .../02-base/base/charts/redis/.helmignore | 21 + .../02-base/base/charts/redis/Chart.lock | 6 + .../02-base/base/charts/redis/Chart.yaml | 29 + .../files/02-base/base/charts/redis/README.md | 707 ++++ .../charts/redis/charts/common/.helmignore | 22 + .../charts/redis/charts/common/Chart.yaml | 23 + .../base/charts/redis/charts/common/README.md | 316 ++ .../charts/common/templates/_affinities.tpl | 94 + .../charts/common/templates/_capabilities.tpl | 61 + .../redis/charts/common/templates/_images.tpl | 43 + .../charts/common/templates/_ingress.tpl | 42 + .../redis/charts/common/templates/_labels.tpl | 18 + .../redis/charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 127 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../redis/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 72 + .../templates/validations/_validations.tpl | 46 + .../charts/redis/charts/common/values.yaml | 3 + .../base/charts/redis/ci/default-values.yaml | 1 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/production-sentinel-values.yaml | 682 ++++ .../base/charts/redis/templates/NOTES.txt | 136 + .../base/charts/redis/templates/_helpers.tpl | 421 +++ .../redis/templates/configmap-scripts.yaml | 393 +++ .../charts/redis/templates/configmap.yaml | 53 + .../charts/redis/templates/headless-svc.yaml | 28 + .../redis/templates/health-configmap.yaml | 176 + .../redis/templates/metrics-prometheus.yaml | 39 + .../charts/redis/templates/metrics-svc.yaml | 34 + .../charts/redis/templates/networkpolicy.yaml | 74 + .../base/charts/redis/templates/pdb.yaml | 22 + .../redis/templates/prometheusrule.yaml | 25 + .../base/charts/redis/templates/psp.yaml | 43 + .../templates/redis-master-statefulset.yaml | 378 ++ .../redis/templates/redis-master-svc.yaml | 43 + .../templates/redis-node-statefulset.yaml | 494 +++ .../base/charts/redis/templates/redis-pv.yaml | 92 + .../charts/redis/templates/redis-role.yaml | 22 + .../redis/templates/redis-rolebinding.yaml | 19 + .../redis/templates/redis-serviceaccount.yaml | 15 + .../templates/redis-slave-statefulset.yaml | 384 ++ .../redis/templates/redis-slave-svc.yaml | 43 + .../templates/redis-with-sentinel-svc.yaml | 43 + .../base/charts/redis/templates/secret.yaml | 15 + .../base/charts/redis/values.schema.json | 168 + .../02-base/base/charts/redis/values.yaml | 932 +++++ .../02-base/base/charts/zookeeper/.helmignore | 22 + .../02-base/base/charts/zookeeper/Chart.yaml | 5 + .../charts/zookeeper/templates/0.config.yaml | 35 + .../templates/1.service-leader-election.yaml | 16 + .../zookeeper/templates/2.service-client.yaml | 12 + .../templates/3.persistent-volume.yaml | 74 + .../zookeeper/templates/4.statefulset.yaml | 87 + .../charts/zookeeper/templates/5.pvc.yaml | 50 + .../02-base/base/charts/zookeeper/values.yaml | 68 + .../files/02-base/base/index.yaml | 3 + .../files/02-base/base/templates/role.yaml | 16 + .../files/02-base/base/values.yaml | 73 + .../03-ddl-dml/elasticsearch/es-ddl-put.sh | 3085 +++++++++++++++++ ...ete_event_info_create_dest_source_index.sh | 220 ++ ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0-rel332/manual.txt | 31 + ...ete_event_info_create_dest_source_index.sh | 220 ++ ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../03-ddl-dml/postgres/jaeger_menumeta.psql | 21 + .../03-ddl-dml/postgres/jspd_menumeta.psql | 22 + ...ete_event_info_create_dest_source_index.sh | 220 ++ ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../patch/memu_meta/jaeger_menumeta.psql | 21 + .../patch/memu_meta/jspd_menumeta.psql | 22 + .../postgres/patch/postgres_patch_3.2.0.psql | 803 +++++ .../postgres/patch/postgres_patch_3.3.0.psql | 919 +++++ .../postgres/patch/postgres_patch_3.3.2.psql | 459 +++ .../postgres/patch/postgres_patch_3.4.1.psql | 1379 ++++++++ .../postgres/patch/postgres_patch_3.4.2.psql | 8 + .../postgres/patch/postgres_patch_3.4.3.psql | 361 ++ .../postgres/patch/postgres_patch_3.4.6.psql | 360 ++ .../postgres/patch/postgres_patch_3.4.7.psql | 102 + .../postgres/patch/postgres_patch_3.4.8.psql | 387 +++ .../patch/postgres_patch_R30020210503.psql | 2844 +++++++++++++++ .../patch/postgres_patch_R30020210730.psql | 4 + .../postgres/postgres_insert_ddl.psql | 1667 +++++++++ .../postgres/postgres_insert_dml.psql | 2380 +++++++++++++ .../files/04-keycloak/Chart.yaml | 23 + .../files/04-keycloak/OWNERS | 6 + .../files/04-keycloak/README.md | 765 ++++ .../04-keycloak/charts/postgresql/.helmignore | 21 + .../04-keycloak/charts/postgresql/Chart.yaml | 24 + .../04-keycloak/charts/postgresql/README.md | 625 ++++ .../postgresql/charts/common/.helmignore | 22 + .../postgresql/charts/common/Chart.yaml | 19 + .../charts/postgresql/charts/common/README.md | 228 ++ .../charts/common/templates/_capabilities.tpl | 22 + .../charts/common/templates/_images.tpl | 44 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 49 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_warnings.tpl | 14 + .../postgresql/charts/common/values.yaml | 3 + .../postgresql/ci/commonAnnotations.yaml | 4 + .../charts/postgresql/ci/default-values.yaml | 1 + .../ci/shmvolume-disabled-values.yaml | 2 + .../charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/postgresql/requirements.lock | 6 + .../charts/postgresql/requirements.yaml | 4 + .../charts/postgresql/templates/NOTES.txt | 54 + .../charts/postgresql/templates/_helpers.tpl | 494 +++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../templates/initialization-configmap.yaml | 24 + .../templates/metrics-configmap.yaml | 13 + .../postgresql/templates/metrics-svc.yaml | 25 + .../postgresql/templates/networkpolicy.yaml | 36 + .../templates/podsecuritypolicy.yaml | 37 + .../postgresql/templates/prometheusrule.yaml | 23 + .../charts/postgresql/templates/pv.yaml | 27 + .../charts/postgresql/templates/role.yaml | 19 + .../postgresql/templates/rolebinding.yaml | 19 + .../charts/postgresql/templates/secrets.yaml | 23 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 340 ++ .../postgresql/templates/statefulset.yaml | 510 +++ .../postgresql/templates/svc-headless.yaml | 18 + .../charts/postgresql/templates/svc-read.yaml | 42 + .../charts/postgresql/templates/svc.yaml | 40 + .../charts/postgresql/values-production.yaml | 591 ++++ .../charts/postgresql/values.schema.json | 103 + .../04-keycloak/charts/postgresql/values.yaml | 604 ++++ .../files/04-keycloak/ci/h2-values.yaml | 38 + .../04-keycloak/ci/postgres-ha-values.yaml | 73 + .../files/04-keycloak/requirements.lock | 6 + .../files/04-keycloak/requirements.yaml | 5 + .../files/04-keycloak/scripts/keycloak.cli | 13 + .../files/04-keycloak/templates/NOTES.txt | 61 + .../files/04-keycloak/templates/_helpers.tpl | 87 + .../templates/configmap-startup.yaml | 14 + .../files/04-keycloak/templates/hpa.yaml | 22 + .../files/04-keycloak/templates/ingress.yaml | 104 + .../04-keycloak/templates/networkpolicy.yaml | 46 + .../templates/poddisruptionbudget.yaml | 13 + .../04-keycloak/templates/prometheusrule.yaml | 24 + .../files/04-keycloak/templates/rbac.yaml | 25 + .../files/04-keycloak/templates/route.yaml | 34 + .../files/04-keycloak/templates/secrets.yaml | 29 + .../templates/service-headless.yaml | 18 + .../04-keycloak/templates/service-http.yaml | 59 + .../04-keycloak/templates/serviceaccount.yaml | 19 + .../04-keycloak/templates/servicemonitor.yaml | 39 + .../04-keycloak/templates/statefulset.yaml | 208 ++ .../templates/test/configmap-test.yaml | 50 + .../04-keycloak/templates/test/pod-test.yaml | 43 + .../files/04-keycloak/values.schema.json | 434 +++ .../files/04-keycloak/values.yaml | 552 +++ .../files/05-imxc/Chart.yaml | 5 + .../files/05-imxc/cmoa-manual.yaml | 36 + .../files/05-imxc/scripts/init-api-server.sh | 17 + .../files/05-imxc/scripts/init-auth-server.sh | 36 + .../files/05-imxc/scripts/init-noti-server.sh | 14 + .../files/05-imxc/scripts/init-resource.sh | 6 + .../files/05-imxc/scripts/init.json | 2148 ++++++++++++ .../files/05-imxc/templates/auth-server.yaml | 82 + .../05-imxc/templates/cloudmoa-datagate.yaml | 79 + .../templates/cloudmoa-metric-agent.yaml | 331 ++ .../templates/cloudmoa-metric-collector.yaml | 45 + .../templates/cmoa-kube-info-batch.yaml | 38 + .../templates/cmoa-kube-info-connector.yaml | 48 + .../templates/cmoa-kube-info-flat.yaml | 35 + .../files/05-imxc/templates/cmoa-manual.yaml | 36 + .../05-imxc/templates/eureka-server.yaml | 60 + .../05-imxc/templates/imxc-api-server.yaml | 245 ++ .../05-imxc/templates/imxc-collector.yaml | 79 + .../files/05-imxc/templates/noti-server.yaml | 121 + .../files/05-imxc/templates/streams-depl.yaml | 26 + .../05-imxc/templates/topology-agent.yaml | 107 + .../files/05-imxc/templates/zuul-server.yaml | 62 + .../files/05-imxc/values.yaml | 157 + .../06-imxc-ui/imxc-ui-jaeger/Chart.yaml | 5 + .../imxc-ui-jaeger/cmoa-manual.yaml | 36 + .../imxc-ui-jaeger/scripts/init-api-server.sh | 16 + .../scripts/init-auth-server.sh | 36 + .../scripts/init-noti-server.sh | 14 + .../imxc-ui-jaeger/scripts/init-resource.sh | 6 + .../imxc-ui-jaeger/scripts/init.json | 2148 ++++++++++++ .../templates/imxc-ui-config-jaeger.yaml | 75 + .../templates/imxc-ui-server-jaeger.yaml | 63 + .../06-imxc-ui/imxc-ui-jaeger/values.yaml | 94 + .../files/06-imxc-ui/imxc-ui-jspd/Chart.yaml | 5 + .../imxc-ui-jspd/scripts/init-api-server.sh | 16 + .../imxc-ui-jspd/scripts/init-auth-server.sh | 36 + .../imxc-ui-jspd/scripts/init-noti-server.sh | 14 + .../imxc-ui-jspd/scripts/init-resource.sh | 6 + .../06-imxc-ui/imxc-ui-jspd/scripts/init.json | 2148 ++++++++++++ .../templates/imxc-ui-config.yaml | 44 + .../templates/imxc-ui-server.yaml | 63 + .../files/06-imxc-ui/imxc-ui-jspd/values.yaml | 94 + roles/cmoa_demo_install/files/ip_change | 15 + roles/cmoa_demo_install/files/k8s_status | 86 + .../files/postgres_check_data | 6 + roles/cmoa_demo_install/files/rel_change | 15 + .../tasks/00-default-settings-master.yml | 30 + .../tasks/00-default-settings-node.yml | 27 + .../tasks/01-storage-install.yml | 45 + .../tasks/02-base-install.yml | 51 + roles/cmoa_demo_install/tasks/03-ddl-dml.yml | 64 + .../tasks/04-keycloak-install.yml | 34 + .../tasks/05-imxc-install.yml | 16 + .../tasks/06-imxc-ui-install.yml | 112 + .../tasks/07-keycloak-setting.yml | 76 + roles/cmoa_demo_install/tasks/08-finish.yml | 92 + .../cmoa_demo_install/tasks/helm-install.yml | 60 + roles/cmoa_demo_install/tasks/main.yml | 43 + .../cmoa_demo_install/templates/realm.json.j2 | 7 + roles/cmoa_demo_install/vars/main.yml | 7 + roles/cmoa_install/defaults/main.yml | 65 + .../cmoa_install/files/00-default/sa_patch.sh | 8 + .../files/00-default/secret_dockerhub.yaml | 7 + .../files/00-default/secret_nexus.yaml | 8 + .../files/01-storage/00-storageclass.yaml | 6 + .../files/01-storage/01-persistentvolume.yaml | 92 + .../cmoa_install/files/01-storage/cmoa_minio | 63 + .../files/01-storage/minio/.helmignore | 23 + .../files/01-storage/minio/Chart.yaml | 18 + .../files/01-storage/minio/README.md | 235 ++ .../01-storage/minio/templates/NOTES.txt | 43 + .../minio/templates/_helper_create_bucket.txt | 109 + .../minio/templates/_helper_create_policy.txt | 75 + .../minio/templates/_helper_create_user.txt | 88 + .../templates/_helper_custom_command.txt | 58 + .../minio/templates/_helper_policy.tpl | 18 + .../01-storage/minio/templates/_helpers.tpl | 218 ++ .../01-storage/minio/templates/configmap.yaml | 24 + .../minio/templates/console-ingress.yaml | 58 + .../minio/templates/console-service.yaml | 48 + .../minio/templates/deployment.yaml | 174 + .../minio/templates/gateway-deployment.yaml | 173 + .../01-storage/minio/templates/ingress.yaml | 58 + .../minio/templates/networkpolicy.yaml | 27 + .../minio/templates/poddisruptionbudget.yaml | 14 + .../post-install-create-bucket-job.yaml | 87 + .../post-install-create-policy-job.yaml | 87 + .../post-install-create-user-job.yaml | 97 + .../post-install-custom-command.yaml | 87 + .../files/01-storage/minio/templates/pvc.yaml | 35 + .../01-storage/minio/templates/secrets.yaml | 22 + .../templates/securitycontextconstraints.yaml | 45 + .../01-storage/minio/templates/service.yaml | 49 + .../minio/templates/serviceaccount.yaml | 7 + .../minio/templates/servicemonitor.yaml | 51 + .../minio/templates/statefulset.yaml | 217 ++ .../files/01-storage/minio/values.yaml | 461 +++ .../files/02-base/00-kafka-broker-config.yaml | 161 + .../files/02-base/01-coredns.yaml | 35 + .../files/02-base/base/.helmignore | 22 + .../files/02-base/base/Chart.yaml | 5 + .../02-base/base/charts/analysis/.helmignore | 22 + .../02-base/base/charts/analysis/Chart.yaml | 5 + .../imxc-metric-analyzer-master.yaml | 87 + .../imxc-metric-analyzer-worker.yaml | 38 + .../02-base/base/charts/analysis/values.yaml | 68 + .../02-base/base/charts/cortex/.helmignore | 29 + .../02-base/base/charts/cortex/Chart.lock | 24 + .../02-base/base/charts/cortex/Chart.yaml | 56 + .../02-base/base/charts/cortex/README.md | 754 ++++ .../base/charts/cortex/templates/NOTES.txt | 9 + .../base/charts/cortex/templates/_helpers.tpl | 155 + .../alertmanager/alertmanager-dep.yaml | 30 + .../alertmanager/alertmanager-svc.yaml | 10 + .../charts/cortex/templates/clusterrole.yaml | 12 + .../cortex/templates/clusterrolebinding.yaml | 16 + .../compactor/_helpers-compactor.tpl | 23 + .../compactor-poddisruptionbudget.yaml | 14 + .../compactor/compactor-servicemonitor.yaml | 42 + .../compactor/compactor-statefulset.yaml | 141 + .../templates/compactor/compactor-svc.yaml | 25 + .../charts/cortex/templates/configmap.yaml | 12 + .../templates/configs/_helpers-configs.tpl | 23 + .../cortex/templates/configs/configs-dep.yaml | 124 + .../configs/configs-poddisruptionbudget.yaml | 14 + .../configs/configs-servicemonitor.yaml | 42 + .../cortex/templates/configs/configs-svc.yaml | 23 + .../charts/cortex/templates/cortex-pv.yaml | 68 + .../distributor/_helpers-distributor.tpl | 23 + .../distributor/distributor-dep.yaml | 121 + .../distributor/distributor-hpa.yaml | 39 + .../distributor-poddisruptionbudget.yaml | 14 + .../distributor-servicemonitor.yaml | 42 + .../distributor/distributor-svc-headless.yaml | 23 + .../distributor/distributor-svc.yaml | 21 + .../templates/ingester/_helpers-ingester.tpl | 23 + .../templates/ingester/ingester-dep.yaml | 130 + .../templates/ingester/ingester-hpa.yaml | 29 + .../ingester-poddisruptionbudget.yaml | 14 + .../ingester/ingester-servicemonitor.yaml | 42 + .../ingester/ingester-statefulset.yaml | 153 + .../ingester/ingester-svc-headless.yaml | 22 + .../templates/ingester/ingester-svc.yaml | 21 + .../cortex/templates/nginx/_helpers-nginx.tpl | 23 + .../cortex/templates/nginx/nginx-config.yaml | 140 + .../cortex/templates/nginx/nginx-dep.yaml | 111 + .../cortex/templates/nginx/nginx-hpa.yaml | 39 + .../cortex/templates/nginx/nginx-ingress.yaml | 40 + .../nginx/nginx-poddisruptionbudget.yaml | 14 + .../cortex/templates/nginx/nginx-svc.yaml | 23 + .../cortex/templates/node-exporter.yaml | 96 + .../templates/querier/_helpers-querier.tpl | 23 + .../cortex/templates/querier/querier-dep.yaml | 115 + .../cortex/templates/querier/querier-hpa.yaml | 39 + .../querier/querier-poddisruptionbudget.yaml | 14 + .../querier/querier-servicemonitor.yaml | 42 + .../cortex/templates/querier/querier-svc.yaml | 21 + .../_helpers-query-frontend.tpl | 23 + .../query-frontend/query-frontend-dep.yaml | 107 + .../query-frontend-servicemonitor.yaml | 42 + .../query-frontend-svc-headless.yaml | 23 + .../query-frontend/query-frontend-svc.yaml | 21 + .../query-poddisruptionbudget.yaml | 14 + .../cortex/templates/ruler/_helpers-ruler.tpl | 30 + .../templates/ruler/ruler-configmap.yaml | 14 + .../cortex/templates/ruler/ruler-dep.yaml | 191 + .../ruler/ruler-poddisruptionbudget.yaml | 14 + .../templates/ruler/ruler-servicemonitor.yaml | 42 + .../cortex/templates/ruler/ruler-svc.yaml | 23 + .../cortex/templates/runtime-configmap.yaml | 18 + .../cortex/templates/secret-postgresql.yaml | 11 + .../base/charts/cortex/templates/secret.yaml | 11 + .../cortex/templates/serviceaccount.yaml | 12 + .../store-gateway/_helpers-store-gateway.tpl | 23 + .../store-gateway-poddisruptionbudget.yaml | 14 + .../store-gateway-servicemonitor.yaml | 42 + .../store-gateway-statefulset.yaml | 142 + .../store-gateway-svc-headless.yaml | 24 + .../store-gateway/store-gateway-svc.yaml | 23 + .../templates/svc-memberlist-headless.yaml | 18 + .../table-manager/_helpers-table-manager.tpl | 23 + .../table-manager/table-manager-dep.yaml | 106 + .../table-manager-poddisruptionbudget.yaml | 14 + .../table-manager-servicemonitor.yaml | 42 + .../table-manager/table-manager-svc.yaml | 23 + .../02-base/base/charts/cortex/values.yaml | 1605 +++++++++ .../base/charts/elasticsearch/.helmignore | 2 + .../base/charts/elasticsearch/Chart.yaml | 12 + .../templates/1.headless_service.yaml | 14 + .../elasticsearch/templates/2.service.yaml | 17 + .../elasticsearch/templates/3.configmap.yaml | 41 + .../charts/elasticsearch/templates/4.pv.yaml | 74 + .../charts/elasticsearch/templates/5.pvc.yaml | 53 + .../templates/6.statefulset.yaml | 146 + .../elasticsearch/templates/7.secrets.yaml | 10 + .../templates/needtocheck_storageclass.yaml | 8 + .../base/charts/elasticsearch/values.yaml | 68 + .../base/charts/kafka-manager/.helmignore | 22 + .../base/charts/kafka-manager/Chart.yaml | 5 + .../templates/0.kafka-manager-service.yaml | 14 + .../templates/1.kafka-manager.yaml | 33 + .../base/charts/kafka-manager/values.yaml | 68 + .../02-base/base/charts/kafka/.helmignore | 22 + .../base/charts/kafka/1.broker-config.yaml | 161 + .../02-base/base/charts/kafka/Chart.yaml | 5 + .../base/charts/kafka/templates/2.dns.yaml | 14 + .../kafka/templates/3.bootstrap-service.yaml | 11 + .../kafka/templates/4.persistent-volume.yaml | 76 + .../base/charts/kafka/templates/5.kafka.yaml | 132 + .../charts/kafka/templates/6.outside.yaml | 89 + .../02-base/base/charts/kafka/values.yaml | 68 + .../02-base/base/charts/postgres/.helmignore | 22 + .../02-base/base/charts/postgres/Chart.yaml | 5 + .../templates/1.postgres-configmap.yaml | 11 + .../templates/2.postgres-storage.yaml | 38 + .../templates/3.postgres-service.yaml | 14 + .../templates/4.postgres-deployment.yaml | 45 + .../02-base/base/charts/postgres/values.yaml | 68 + .../02-base/base/charts/rabbitmq/.helmignore | 21 + .../02-base/base/charts/rabbitmq/Chart.lock | 6 + .../02-base/base/charts/rabbitmq/Chart.yaml | 26 + .../02-base/base/charts/rabbitmq/README.md | 566 +++ .../charts/rabbitmq/charts/common/.helmignore | 22 + .../charts/rabbitmq/charts/common/Chart.yaml | 23 + .../charts/rabbitmq/charts/common/README.md | 327 ++ .../charts/common/templates/_affinities.tpl | 102 + .../charts/common/templates/_capabilities.tpl | 117 + .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 55 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 129 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../charts/rabbitmq/charts/common/values.yaml | 5 + .../charts/rabbitmq/ci/default-values.yaml | 1 + .../rabbitmq/ci/tolerations-values.yaml | 4 + .../base/charts/rabbitmq/templates/NOTES.txt | 167 + .../charts/rabbitmq/templates/_helpers.tpl | 247 ++ .../rabbitmq/templates/configuration.yaml | 16 + .../charts/rabbitmq/templates/extra-list.yaml | 4 + .../charts/rabbitmq/templates/ingress.yaml | 57 + .../rabbitmq/templates/networkpolicy.yaml | 37 + .../base/charts/rabbitmq/templates/pdb.yaml | 20 + .../rabbitmq/templates/prometheusrule.yaml | 24 + .../base/charts/rabbitmq/templates/pv.yaml | 22 + .../base/charts/rabbitmq/templates/pvc.yaml | 15 + .../base/charts/rabbitmq/templates/role.yaml | 18 + .../rabbitmq/templates/rolebinding.yaml | 18 + .../charts/rabbitmq/templates/secrets.yaml | 43 + .../rabbitmq/templates/serviceaccount.yaml | 14 + .../rabbitmq/templates/servicemonitor.yaml | 49 + .../rabbitmq/templates/statefulset.yaml | 382 ++ .../rabbitmq/templates/svc-headless.yaml | 40 + .../base/charts/rabbitmq/templates/svc.yaml | 95 + .../rabbitmq/templates/tls-secrets.yaml | 74 + .../base/charts/rabbitmq/values.schema.json | 100 + .../02-base/base/charts/rabbitmq/values.yaml | 1151 ++++++ .../02-base/base/charts/redis/.helmignore | 21 + .../02-base/base/charts/redis/Chart.lock | 6 + .../02-base/base/charts/redis/Chart.yaml | 29 + .../files/02-base/base/charts/redis/README.md | 707 ++++ .../charts/redis/charts/common/.helmignore | 22 + .../charts/redis/charts/common/Chart.yaml | 23 + .../base/charts/redis/charts/common/README.md | 316 ++ .../charts/common/templates/_affinities.tpl | 94 + .../charts/common/templates/_capabilities.tpl | 61 + .../redis/charts/common/templates/_images.tpl | 43 + .../charts/common/templates/_ingress.tpl | 42 + .../redis/charts/common/templates/_labels.tpl | 18 + .../redis/charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 127 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../redis/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 131 + .../common/templates/validations/_redis.tpl | 72 + .../templates/validations/_validations.tpl | 46 + .../charts/redis/charts/common/values.yaml | 3 + .../base/charts/redis/ci/default-values.yaml | 1 + .../charts/redis/ci/extra-flags-values.yaml | 11 + .../redis/ci/production-sentinel-values.yaml | 682 ++++ .../base/charts/redis/templates/NOTES.txt | 136 + .../base/charts/redis/templates/_helpers.tpl | 421 +++ .../redis/templates/configmap-scripts.yaml | 393 +++ .../charts/redis/templates/configmap.yaml | 53 + .../charts/redis/templates/headless-svc.yaml | 28 + .../redis/templates/health-configmap.yaml | 176 + .../redis/templates/metrics-prometheus.yaml | 39 + .../charts/redis/templates/metrics-svc.yaml | 34 + .../charts/redis/templates/networkpolicy.yaml | 74 + .../base/charts/redis/templates/pdb.yaml | 22 + .../redis/templates/prometheusrule.yaml | 25 + .../base/charts/redis/templates/psp.yaml | 43 + .../templates/redis-master-statefulset.yaml | 378 ++ .../redis/templates/redis-master-svc.yaml | 43 + .../templates/redis-node-statefulset.yaml | 494 +++ .../base/charts/redis/templates/redis-pv.yaml | 92 + .../charts/redis/templates/redis-role.yaml | 22 + .../redis/templates/redis-rolebinding.yaml | 19 + .../redis/templates/redis-serviceaccount.yaml | 15 + .../templates/redis-slave-statefulset.yaml | 384 ++ .../redis/templates/redis-slave-svc.yaml | 43 + .../templates/redis-with-sentinel-svc.yaml | 43 + .../base/charts/redis/templates/secret.yaml | 15 + .../base/charts/redis/values.schema.json | 168 + .../02-base/base/charts/redis/values.yaml | 932 +++++ .../02-base/base/charts/zookeeper/.helmignore | 22 + .../02-base/base/charts/zookeeper/Chart.yaml | 5 + .../charts/zookeeper/templates/0.config.yaml | 35 + .../templates/1.service-leader-election.yaml | 16 + .../zookeeper/templates/2.service-client.yaml | 12 + .../templates/3.persistent-volume.yaml | 74 + .../zookeeper/templates/4.statefulset.yaml | 87 + .../charts/zookeeper/templates/5.pvc.yaml | 50 + .../02-base/base/charts/zookeeper/values.yaml | 68 + .../files/02-base/base/index.yaml | 3 + .../files/02-base/base/templates/role.yaml | 16 + .../files/02-base/base/values.yaml | 73 + .../03-ddl-dml/elasticsearch/es-ddl-put.sh | 3085 +++++++++++++++++ ...ete_event_info_create_dest_source_index.sh | 220 ++ ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0-rel332/manual.txt | 31 + ...ete_event_info_create_dest_source_index.sh | 220 ++ ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../03-ddl-dml/postgres/jaeger_menumeta.psql | 21 + .../03-ddl-dml/postgres/jspd_menumeta.psql | 22 + ...ete_event_info_create_dest_source_index.sh | 220 ++ ..._event_info_reindex_to_dest_from_source.sh | 28 + ..._event_info_reindex_to_source_from_dest.sh | 30 + ..._kubernete_event_info_delete_dest_index.sh | 21 + ...icense_history_create_dest_source_index.sh | 184 + ...nse_history_reindex_to_dest_from_source.sh | 32 + ...nse_history_reindex_to_source_from_dest.sh | 30 + .../8_license_history_delete_dest_index.sh | 21 + .../patch/es-reindex-3.2.0/manual.txt | 31 + .../patch/memu_meta/jaeger_menumeta.psql | 21 + .../patch/memu_meta/jspd_menumeta.psql | 22 + .../postgres/patch/postgres_patch_3.2.0.psql | 803 +++++ .../postgres/patch/postgres_patch_3.3.0.psql | 919 +++++ .../postgres/patch/postgres_patch_3.3.2.psql | 459 +++ .../postgres/patch/postgres_patch_3.4.1.psql | 1379 ++++++++ .../postgres/patch/postgres_patch_3.4.2.psql | 8 + .../postgres/patch/postgres_patch_3.4.3.psql | 361 ++ .../postgres/patch/postgres_patch_3.4.6.psql | 360 ++ .../postgres/patch/postgres_patch_3.4.7.psql | 102 + .../postgres/patch/postgres_patch_3.4.8.psql | 387 +++ .../patch/postgres_patch_R30020210503.psql | 2844 +++++++++++++++ .../patch/postgres_patch_R30020210730.psql | 4 + .../postgres/postgres_insert_ddl.psql | 1667 +++++++++ .../postgres/postgres_insert_dml.psql | 2380 +++++++++++++ .../cmoa_install/files/04-keycloak/Chart.yaml | 23 + roles/cmoa_install/files/04-keycloak/OWNERS | 6 + .../cmoa_install/files/04-keycloak/README.md | 765 ++++ .../04-keycloak/charts/postgresql/.helmignore | 21 + .../04-keycloak/charts/postgresql/Chart.yaml | 24 + .../04-keycloak/charts/postgresql/README.md | 625 ++++ .../postgresql/charts/common/.helmignore | 22 + .../postgresql/charts/common/Chart.yaml | 19 + .../charts/postgresql/charts/common/README.md | 228 ++ .../charts/common/templates/_capabilities.tpl | 22 + .../charts/common/templates/_images.tpl | 44 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 32 + .../charts/common/templates/_secrets.tpl | 49 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_warnings.tpl | 14 + .../postgresql/charts/common/values.yaml | 3 + .../postgresql/ci/commonAnnotations.yaml | 4 + .../charts/postgresql/ci/default-values.yaml | 1 + .../ci/shmvolume-disabled-values.yaml | 2 + .../charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/postgresql/requirements.lock | 6 + .../charts/postgresql/requirements.yaml | 4 + .../charts/postgresql/templates/NOTES.txt | 54 + .../charts/postgresql/templates/_helpers.tpl | 494 +++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../templates/initialization-configmap.yaml | 24 + .../templates/metrics-configmap.yaml | 13 + .../postgresql/templates/metrics-svc.yaml | 25 + .../postgresql/templates/networkpolicy.yaml | 36 + .../templates/podsecuritypolicy.yaml | 37 + .../postgresql/templates/prometheusrule.yaml | 23 + .../charts/postgresql/templates/pv.yaml | 27 + .../charts/postgresql/templates/role.yaml | 19 + .../postgresql/templates/rolebinding.yaml | 19 + .../charts/postgresql/templates/secrets.yaml | 23 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 340 ++ .../postgresql/templates/statefulset.yaml | 510 +++ .../postgresql/templates/svc-headless.yaml | 18 + .../charts/postgresql/templates/svc-read.yaml | 42 + .../charts/postgresql/templates/svc.yaml | 40 + .../charts/postgresql/values-production.yaml | 591 ++++ .../charts/postgresql/values.schema.json | 103 + .../04-keycloak/charts/postgresql/values.yaml | 604 ++++ .../files/04-keycloak/ci/h2-values.yaml | 38 + .../04-keycloak/ci/postgres-ha-values.yaml | 73 + .../files/04-keycloak/requirements.lock | 6 + .../files/04-keycloak/requirements.yaml | 5 + .../files/04-keycloak/scripts/keycloak.cli | 13 + .../files/04-keycloak/templates/NOTES.txt | 61 + .../files/04-keycloak/templates/_helpers.tpl | 87 + .../templates/configmap-startup.yaml | 14 + .../files/04-keycloak/templates/hpa.yaml | 22 + .../files/04-keycloak/templates/ingress.yaml | 104 + .../04-keycloak/templates/networkpolicy.yaml | 46 + .../templates/poddisruptionbudget.yaml | 13 + .../04-keycloak/templates/prometheusrule.yaml | 24 + .../files/04-keycloak/templates/rbac.yaml | 25 + .../files/04-keycloak/templates/route.yaml | 34 + .../files/04-keycloak/templates/secrets.yaml | 29 + .../templates/service-headless.yaml | 18 + .../04-keycloak/templates/service-http.yaml | 59 + .../04-keycloak/templates/serviceaccount.yaml | 19 + .../04-keycloak/templates/servicemonitor.yaml | 39 + .../04-keycloak/templates/statefulset.yaml | 208 ++ .../templates/test/configmap-test.yaml | 50 + .../04-keycloak/templates/test/pod-test.yaml | 43 + .../files/04-keycloak/values.schema.json | 434 +++ .../files/04-keycloak/values.yaml | 552 +++ roles/cmoa_install/files/05-imxc/Chart.yaml | 5 + .../files/05-imxc/cmoa-manual.yaml | 36 + .../files/05-imxc/scripts/init-api-server.sh | 17 + .../files/05-imxc/scripts/init-auth-server.sh | 36 + .../files/05-imxc/scripts/init-noti-server.sh | 14 + .../files/05-imxc/scripts/init-resource.sh | 6 + .../files/05-imxc/scripts/init.json | 2148 ++++++++++++ .../files/05-imxc/templates/auth-server.yaml | 82 + .../05-imxc/templates/cloudmoa-datagate.yaml | 79 + .../templates/cloudmoa-metric-agent.yaml | 331 ++ .../templates/cloudmoa-metric-collector.yaml | 45 + .../templates/cmoa-kube-info-batch.yaml | 38 + .../templates/cmoa-kube-info-connector.yaml | 48 + .../templates/cmoa-kube-info-flat.yaml | 35 + .../files/05-imxc/templates/cmoa-manual.yaml | 36 + .../05-imxc/templates/eureka-server.yaml | 60 + .../05-imxc/templates/imxc-api-server.yaml | 245 ++ .../05-imxc/templates/imxc-collector.yaml | 79 + .../files/05-imxc/templates/noti-server.yaml | 121 + .../files/05-imxc/templates/streams-depl.yaml | 26 + .../05-imxc/templates/topology-agent.yaml | 107 + .../files/05-imxc/templates/zuul-server.yaml | 62 + roles/cmoa_install/files/05-imxc/values.yaml | 157 + .../06-imxc-ui/imxc-ui-jaeger/Chart.yaml | 5 + .../imxc-ui-jaeger/cmoa-manual.yaml | 36 + .../imxc-ui-jaeger/scripts/init-api-server.sh | 16 + .../scripts/init-auth-server.sh | 36 + .../scripts/init-noti-server.sh | 14 + .../imxc-ui-jaeger/scripts/init-resource.sh | 6 + .../imxc-ui-jaeger/scripts/init.json | 2148 ++++++++++++ .../templates/imxc-ui-config-jaeger.yaml | 75 + .../templates/imxc-ui-server-jaeger.yaml | 63 + .../06-imxc-ui/imxc-ui-jaeger/values.yaml | 94 + .../files/06-imxc-ui/imxc-ui-jspd/Chart.yaml | 5 + .../imxc-ui-jspd/scripts/init-api-server.sh | 16 + .../imxc-ui-jspd/scripts/init-auth-server.sh | 36 + .../imxc-ui-jspd/scripts/init-noti-server.sh | 14 + .../imxc-ui-jspd/scripts/init-resource.sh | 6 + .../06-imxc-ui/imxc-ui-jspd/scripts/init.json | 2148 ++++++++++++ .../templates/imxc-ui-config.yaml | 44 + .../templates/imxc-ui-server.yaml | 63 + .../files/06-imxc-ui/imxc-ui-jspd/values.yaml | 94 + roles/cmoa_install/files/ip_change | 15 + roles/cmoa_install/files/k8s_status | 86 + roles/cmoa_install/files/postgres_check_data | 6 + roles/cmoa_install/files/rel_change | 15 + .../tasks/00-default-settings-master.yml | 30 + .../tasks/00-default-settings-node.yml | 27 + .../cmoa_install/tasks/01-storage-install.yml | 45 + roles/cmoa_install/tasks/02-base-install.yml | 51 + roles/cmoa_install/tasks/03-ddl-dml.yml | 59 + .../tasks/04-keycloak-install.yml | 34 + roles/cmoa_install/tasks/05-imxc-install.yml | 16 + .../cmoa_install/tasks/06-imxc-ui-install.yml | 112 + .../tasks/07-keycloak-setting.yml | 90 + roles/cmoa_install/tasks/08-finish.yml | 17 + roles/cmoa_install/tasks/helm-install.yml | 60 + roles/cmoa_install/tasks/main.yml | 43 + roles/cmoa_install/templates/realm.json.j2 | 7 + roles/cmoa_install/vars/main.yml | 7 + roles/cmoa_os_setting/README.md | 38 + roles/cmoa_os_setting/defaults/main.yml | 140 + .../files/ingress-nginx/.helmignore | 22 + .../files/ingress-nginx/CHANGELOG.md | 445 +++ .../files/ingress-nginx/Chart.yaml | 23 + .../files/ingress-nginx/OWNERS | 10 + .../files/ingress-nginx/README.md | 494 +++ .../files/ingress-nginx/README.md.gotmpl | 235 ++ .../controller-custom-ingressclass-flags.yaml | 7 + .../ci/daemonset-customconfig-values.yaml | 14 + .../ci/daemonset-customnodeport-values.yaml | 22 + .../ci/daemonset-extra-modules.yaml | 10 + .../ci/daemonset-headers-values.yaml | 14 + .../ci/daemonset-internal-lb-values.yaml | 14 + .../ci/daemonset-nodeport-values.yaml | 10 + .../ci/daemonset-podannotations-values.yaml | 17 + ...set-tcp-udp-configMapNamespace-values.yaml | 20 + ...emonset-tcp-udp-portNamePrefix-values.yaml | 18 + .../ci/daemonset-tcp-udp-values.yaml | 16 + .../ci/daemonset-tcp-values.yaml | 14 + .../ci/deamonset-default-values.yaml | 10 + .../ci/deamonset-metrics-values.yaml | 12 + .../ci/deamonset-psp-values.yaml | 13 + .../ci/deamonset-webhook-and-psp-values.yaml | 13 + .../ci/deamonset-webhook-values.yaml | 10 + ...eployment-autoscaling-behavior-values.yaml | 14 + .../ci/deployment-autoscaling-values.yaml | 11 + .../ci/deployment-customconfig-values.yaml | 12 + .../ci/deployment-customnodeport-values.yaml | 20 + .../ci/deployment-default-values.yaml | 8 + .../ci/deployment-extra-modules.yaml | 10 + .../ci/deployment-headers-values.yaml | 13 + .../ci/deployment-internal-lb-values.yaml | 13 + .../ci/deployment-metrics-values.yaml | 11 + .../ci/deployment-nodeport-values.yaml | 9 + .../ci/deployment-podannotations-values.yaml | 16 + .../ci/deployment-psp-values.yaml | 10 + ...ent-tcp-udp-configMapNamespace-values.yaml | 19 + ...loyment-tcp-udp-portNamePrefix-values.yaml | 17 + .../ci/deployment-tcp-udp-values.yaml | 15 + .../ci/deployment-tcp-values.yaml | 11 + .../ci/deployment-webhook-and-psp-values.yaml | 12 + .../deployment-webhook-extraEnvs-values.yaml | 12 + .../deployment-webhook-resources-values.yaml | 23 + .../ci/deployment-webhook-values.yaml | 9 + .../files/ingress-nginx/override-values.yaml | 10 + .../files/ingress-nginx/temp.yaml | 724 ++++ .../files/ingress-nginx/temp2.yaml | 725 ++++ .../files/ingress-nginx/templates/NOTES.txt | 80 + .../ingress-nginx/templates/_helpers.tpl | 185 + .../files/ingress-nginx/templates/_params.tpl | 62 + .../job-patch/clusterrole.yaml | 34 + .../job-patch/clusterrolebinding.yaml | 23 + .../job-patch/job-createSecret.yaml | 79 + .../job-patch/job-patchWebhook.yaml | 81 + .../admission-webhooks/job-patch/psp.yaml | 39 + .../admission-webhooks/job-patch/role.yaml | 24 + .../job-patch/rolebinding.yaml | 24 + .../job-patch/serviceaccount.yaml | 16 + .../validating-webhook.yaml | 48 + .../ingress-nginx/templates/clusterrole.yaml | 94 + .../templates/clusterrolebinding.yaml | 19 + .../controller-configmap-addheaders.yaml | 14 + .../controller-configmap-proxyheaders.yaml | 19 + .../templates/controller-configmap-tcp.yaml | 17 + .../templates/controller-configmap-udp.yaml | 17 + .../templates/controller-configmap.yaml | 29 + .../templates/controller-daemonset.yaml | 223 ++ .../templates/controller-deployment.yaml | 228 ++ .../templates/controller-hpa.yaml | 52 + .../templates/controller-ingressclass.yaml | 21 + .../templates/controller-keda.yaml | 42 + .../controller-poddisruptionbudget.yaml | 19 + .../templates/controller-prometheusrules.yaml | 21 + .../templates/controller-psp.yaml | 94 + .../templates/controller-role.yaml | 113 + .../templates/controller-rolebinding.yaml | 21 + .../controller-service-internal.yaml | 79 + .../templates/controller-service-metrics.yaml | 45 + .../templates/controller-service-webhook.yaml | 40 + .../templates/controller-service.yaml | 101 + .../templates/controller-serviceaccount.yaml | 18 + .../templates/controller-servicemonitor.yaml | 48 + .../controller-wehbooks-networkpolicy.yaml | 19 + .../templates/default-backend-deployment.yaml | 118 + .../templates/default-backend-hpa.yaml | 33 + .../default-backend-poddisruptionbudget.yaml | 21 + .../templates/default-backend-psp.yaml | 38 + .../templates/default-backend-role.yaml | 22 + .../default-backend-rolebinding.yaml | 21 + .../templates/default-backend-service.yaml | 41 + .../default-backend-serviceaccount.yaml | 14 + .../templates/dh-param-secret.yaml | 10 + .../files/ingress-nginx/values.yaml | 944 +++++ roles/cmoa_os_setting/handlers/main.yml | 10 + roles/cmoa_os_setting/meta/main.yml | 52 + .../tasks/00-centos-os-main.yml | 73 + .../tasks/00-ubuntu-os-main.yml | 71 + .../tasks/01-centos-os-runtime.yml | 45 + .../tasks/01-ubuntu-os-runtime.yml | 78 + roles/cmoa_os_setting/tasks/02-k8s-main.yml | 45 + roles/cmoa_os_setting/tasks/03-k8s-master.yml | 45 + .../tasks/04-k8s-master-yaml.yml | 15 + roles/cmoa_os_setting/tasks/05-k8s-node.yml | 6 + .../tasks/06-worker-directory.yml | 43 + roles/cmoa_os_setting/tasks/main.yml | 19 + .../cmoa_os_setting/templates/config.toml.j2 | 5 + roles/cmoa_os_setting/templates/hosts.j2 | 6 + .../templates/yaml2toml_macro.j2 | 58 + roles/cmoa_os_setting/tests/inventory | 2 + roles/cmoa_os_setting/tests/test.yml | 5 + roles/cmoa_os_setting/vars/main.yml | 2 + ssh_key/authorized_keys.yml | 11 + ssh_key/key_test.sh | 5 + 1022 files changed, 119427 insertions(+) create mode 100644 README.md create mode 100755 ansible.cfg create mode 100755 cmoa_install.yaml create mode 100755 inventory create mode 100644 roles/cmoa_demo_install/defaults/main.yml create mode 100755 roles/cmoa_demo_install/files/00-default/sa_patch.sh create mode 100644 roles/cmoa_demo_install/files/00-default/secret_dockerhub.yaml create mode 100644 roles/cmoa_demo_install/files/00-default/secret_nexus.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/00-storageclass.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/01-persistentvolume.yaml create mode 100755 roles/cmoa_demo_install/files/01-storage/cmoa_minio create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/.helmignore create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/README.md create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/NOTES.txt create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_bucket.txt create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_policy.txt create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_user.txt create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_custom_command.txt create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_policy.tpl create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/_helpers.tpl create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/configmap.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/console-ingress.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/console-service.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/deployment.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/gateway-deployment.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/ingress.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/networkpolicy.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-user-job.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-custom-command.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/pvc.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/secrets.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/securitycontextconstraints.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/service.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/serviceaccount.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/templates/statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/01-storage/minio/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/00-kafka-broker-config.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/01-coredns.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/analysis/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/analysis/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/analysis/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.lock create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/README.md create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/NOTES.txt create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/_helpers.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configmap.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/cortex/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka/1.broker-config.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/2.dns.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/6.outside.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/kafka/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/postgres/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/postgres/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/postgres/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.lock create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/README.md create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/README.md create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/role.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.schema.json create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.lock create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/README.md create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/README.md create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/default-values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/NOTES.txt create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/_helpers.tpl create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/headless-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/health-configmap.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/pdb.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/psp.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-pv.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-role.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/secret.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/values.schema.json create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/redis/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/.helmignore create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/values.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/index.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/templates/role.yaml create mode 100644 roles/cmoa_demo_install/files/02-base/base/values.yaml create mode 100755 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/jspd_menumeta.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql create mode 100644 roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql create mode 100644 roles/cmoa_demo_install/files/04-keycloak/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/OWNERS create mode 100644 roles/cmoa_demo_install/files/04-keycloak/README.md create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/.helmignore create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/README.md create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/README.md create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/README.md create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.lock create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/pv.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/role.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values-production.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.schema.json create mode 100644 roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/ci/h2-values.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/ci/postgres-ha-values.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/requirements.lock create mode 100644 roles/cmoa_demo_install/files/04-keycloak/requirements.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/scripts/keycloak.cli create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/NOTES.txt create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/_helpers.tpl create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/configmap-startup.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/hpa.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/ingress.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/networkpolicy.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/poddisruptionbudget.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/prometheusrule.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/rbac.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/route.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/secrets.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/service-headless.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/service-http.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/serviceaccount.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/servicemonitor.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/statefulset.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/test/configmap-test.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/templates/test/pod-test.yaml create mode 100644 roles/cmoa_demo_install/files/04-keycloak/values.schema.json create mode 100644 roles/cmoa_demo_install/files/04-keycloak/values.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/cmoa-manual.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/scripts/init-api-server.sh create mode 100644 roles/cmoa_demo_install/files/05-imxc/scripts/init-auth-server.sh create mode 100644 roles/cmoa_demo_install/files/05-imxc/scripts/init-noti-server.sh create mode 100644 roles/cmoa_demo_install/files/05-imxc/scripts/init-resource.sh create mode 100644 roles/cmoa_demo_install/files/05-imxc/scripts/init.json create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/auth-server.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-datagate.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/cmoa-manual.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/eureka-server.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/imxc-api-server.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/imxc-collector.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/noti-server.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/streams-depl.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/topology-agent.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/templates/zuul-server.yaml create mode 100644 roles/cmoa_demo_install/files/05-imxc/values.yaml create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml create mode 100644 roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml create mode 100755 roles/cmoa_demo_install/files/ip_change create mode 100755 roles/cmoa_demo_install/files/k8s_status create mode 100755 roles/cmoa_demo_install/files/postgres_check_data create mode 100755 roles/cmoa_demo_install/files/rel_change create mode 100644 roles/cmoa_demo_install/tasks/00-default-settings-master.yml create mode 100644 roles/cmoa_demo_install/tasks/00-default-settings-node.yml create mode 100644 roles/cmoa_demo_install/tasks/01-storage-install.yml create mode 100644 roles/cmoa_demo_install/tasks/02-base-install.yml create mode 100644 roles/cmoa_demo_install/tasks/03-ddl-dml.yml create mode 100644 roles/cmoa_demo_install/tasks/04-keycloak-install.yml create mode 100644 roles/cmoa_demo_install/tasks/05-imxc-install.yml create mode 100644 roles/cmoa_demo_install/tasks/06-imxc-ui-install.yml create mode 100644 roles/cmoa_demo_install/tasks/07-keycloak-setting.yml create mode 100644 roles/cmoa_demo_install/tasks/08-finish.yml create mode 100644 roles/cmoa_demo_install/tasks/helm-install.yml create mode 100644 roles/cmoa_demo_install/tasks/main.yml create mode 100644 roles/cmoa_demo_install/templates/realm.json.j2 create mode 100644 roles/cmoa_demo_install/vars/main.yml create mode 100644 roles/cmoa_install/defaults/main.yml create mode 100755 roles/cmoa_install/files/00-default/sa_patch.sh create mode 100644 roles/cmoa_install/files/00-default/secret_dockerhub.yaml create mode 100644 roles/cmoa_install/files/00-default/secret_nexus.yaml create mode 100644 roles/cmoa_install/files/01-storage/00-storageclass.yaml create mode 100644 roles/cmoa_install/files/01-storage/01-persistentvolume.yaml create mode 100755 roles/cmoa_install/files/01-storage/cmoa_minio create mode 100644 roles/cmoa_install/files/01-storage/minio/.helmignore create mode 100644 roles/cmoa_install/files/01-storage/minio/Chart.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/README.md create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/NOTES.txt create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/_helper_create_bucket.txt create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/_helper_create_policy.txt create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/_helper_create_user.txt create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/_helper_custom_command.txt create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/_helper_policy.tpl create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/_helpers.tpl create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/configmap.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/console-ingress.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/console-service.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/deployment.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/gateway-deployment.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/ingress.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/networkpolicy.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/post-install-create-user-job.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/post-install-custom-command.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/pvc.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/secrets.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/securitycontextconstraints.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/service.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/serviceaccount.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/servicemonitor.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/templates/statefulset.yaml create mode 100644 roles/cmoa_install/files/01-storage/minio/values.yaml create mode 100644 roles/cmoa_install/files/02-base/00-kafka-broker-config.yaml create mode 100644 roles/cmoa_install/files/02-base/01-coredns.yaml create mode 100644 roles/cmoa_install/files/02-base/base/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/analysis/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/analysis/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/analysis/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/Chart.lock create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/README.md create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/NOTES.txt create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/_helpers.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/configmap.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/cortex/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/elasticsearch/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka-manager/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka-manager/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka-manager/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka/1.broker-config.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka/templates/2.dns.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka/templates/6.outside.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/kafka/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/postgres/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/postgres/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/postgres/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.lock create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/README.md create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/README.md create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/role.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.schema.json create mode 100644 roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/Chart.lock create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/README.md create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/README.md create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/charts/common/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/ci/default-values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/NOTES.txt create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/_helpers.tpl create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/headless-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/health-configmap.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/pdb.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/psp.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-pv.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-role.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/templates/secret.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/values.schema.json create mode 100644 roles/cmoa_install/files/02-base/base/charts/redis/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/zookeeper/.helmignore create mode 100644 roles/cmoa_install/files/02-base/base/charts/zookeeper/Chart.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml create mode 100644 roles/cmoa_install/files/02-base/base/charts/zookeeper/values.yaml create mode 100644 roles/cmoa_install/files/02-base/base/index.yaml create mode 100644 roles/cmoa_install/files/02-base/base/templates/role.yaml create mode 100644 roles/cmoa_install/files/02-base/base/values.yaml create mode 100755 roles/cmoa_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/jspd_menumeta.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql create mode 100644 roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql create mode 100644 roles/cmoa_install/files/04-keycloak/Chart.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/OWNERS create mode 100644 roles/cmoa_install/files/04-keycloak/README.md create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/.helmignore create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/Chart.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/README.md create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/README.md create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/files/README.md create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.lock create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/pv.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/role.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/values-production.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/values.schema.json create mode 100644 roles/cmoa_install/files/04-keycloak/charts/postgresql/values.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/ci/h2-values.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/ci/postgres-ha-values.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/requirements.lock create mode 100644 roles/cmoa_install/files/04-keycloak/requirements.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/scripts/keycloak.cli create mode 100644 roles/cmoa_install/files/04-keycloak/templates/NOTES.txt create mode 100644 roles/cmoa_install/files/04-keycloak/templates/_helpers.tpl create mode 100644 roles/cmoa_install/files/04-keycloak/templates/configmap-startup.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/hpa.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/ingress.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/networkpolicy.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/poddisruptionbudget.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/prometheusrule.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/rbac.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/route.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/secrets.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/service-headless.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/service-http.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/serviceaccount.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/servicemonitor.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/statefulset.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/test/configmap-test.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/templates/test/pod-test.yaml create mode 100644 roles/cmoa_install/files/04-keycloak/values.schema.json create mode 100644 roles/cmoa_install/files/04-keycloak/values.yaml create mode 100644 roles/cmoa_install/files/05-imxc/Chart.yaml create mode 100644 roles/cmoa_install/files/05-imxc/cmoa-manual.yaml create mode 100644 roles/cmoa_install/files/05-imxc/scripts/init-api-server.sh create mode 100644 roles/cmoa_install/files/05-imxc/scripts/init-auth-server.sh create mode 100644 roles/cmoa_install/files/05-imxc/scripts/init-noti-server.sh create mode 100644 roles/cmoa_install/files/05-imxc/scripts/init-resource.sh create mode 100644 roles/cmoa_install/files/05-imxc/scripts/init.json create mode 100644 roles/cmoa_install/files/05-imxc/templates/auth-server.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/cloudmoa-datagate.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/cmoa-manual.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/eureka-server.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/imxc-api-server.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/imxc-collector.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/noti-server.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/streams-depl.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/topology-agent.yaml create mode 100644 roles/cmoa_install/files/05-imxc/templates/zuul-server.yaml create mode 100644 roles/cmoa_install/files/05-imxc/values.yaml create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml create mode 100644 roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml create mode 100755 roles/cmoa_install/files/ip_change create mode 100755 roles/cmoa_install/files/k8s_status create mode 100755 roles/cmoa_install/files/postgres_check_data create mode 100755 roles/cmoa_install/files/rel_change create mode 100644 roles/cmoa_install/tasks/00-default-settings-master.yml create mode 100644 roles/cmoa_install/tasks/00-default-settings-node.yml create mode 100644 roles/cmoa_install/tasks/01-storage-install.yml create mode 100644 roles/cmoa_install/tasks/02-base-install.yml create mode 100644 roles/cmoa_install/tasks/03-ddl-dml.yml create mode 100644 roles/cmoa_install/tasks/04-keycloak-install.yml create mode 100644 roles/cmoa_install/tasks/05-imxc-install.yml create mode 100644 roles/cmoa_install/tasks/06-imxc-ui-install.yml create mode 100644 roles/cmoa_install/tasks/07-keycloak-setting.yml create mode 100644 roles/cmoa_install/tasks/08-finish.yml create mode 100644 roles/cmoa_install/tasks/helm-install.yml create mode 100644 roles/cmoa_install/tasks/main.yml create mode 100644 roles/cmoa_install/templates/realm.json.j2 create mode 100644 roles/cmoa_install/vars/main.yml create mode 100644 roles/cmoa_os_setting/README.md create mode 100644 roles/cmoa_os_setting/defaults/main.yml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/.helmignore create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/CHANGELOG.md create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/Chart.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/OWNERS create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/README.md create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/README.md.gotmpl create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/override-values.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/temp.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/temp2.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/NOTES.txt create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/_helpers.tpl create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/_params.tpl create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrole.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-deployment.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-hpa.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-keda.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-psp.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-role.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-role.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-service.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml create mode 100644 roles/cmoa_os_setting/files/ingress-nginx/values.yaml create mode 100644 roles/cmoa_os_setting/handlers/main.yml create mode 100644 roles/cmoa_os_setting/meta/main.yml create mode 100644 roles/cmoa_os_setting/tasks/00-centos-os-main.yml create mode 100644 roles/cmoa_os_setting/tasks/00-ubuntu-os-main.yml create mode 100644 roles/cmoa_os_setting/tasks/01-centos-os-runtime.yml create mode 100644 roles/cmoa_os_setting/tasks/01-ubuntu-os-runtime.yml create mode 100644 roles/cmoa_os_setting/tasks/02-k8s-main.yml create mode 100644 roles/cmoa_os_setting/tasks/03-k8s-master.yml create mode 100644 roles/cmoa_os_setting/tasks/04-k8s-master-yaml.yml create mode 100644 roles/cmoa_os_setting/tasks/05-k8s-node.yml create mode 100644 roles/cmoa_os_setting/tasks/06-worker-directory.yml create mode 100644 roles/cmoa_os_setting/tasks/main.yml create mode 100644 roles/cmoa_os_setting/templates/config.toml.j2 create mode 100644 roles/cmoa_os_setting/templates/hosts.j2 create mode 100644 roles/cmoa_os_setting/templates/yaml2toml_macro.j2 create mode 100644 roles/cmoa_os_setting/tests/inventory create mode 100644 roles/cmoa_os_setting/tests/test.yml create mode 100644 roles/cmoa_os_setting/vars/main.yml create mode 100644 ssh_key/authorized_keys.yml create mode 100755 ssh_key/key_test.sh diff --git a/README.md b/README.md new file mode 100644 index 0000000..f7dc5d2 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# ansible installer + + diff --git a/ansible.cfg b/ansible.cfg new file mode 100755 index 0000000..fca76ad --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,5 @@ +[defaults] +inventory = inventory +roles_path = roles +deprecation_warnings = False +display_skipped_hosts = no diff --git a/cmoa_install.yaml b/cmoa_install.yaml new file mode 100755 index 0000000..a6427a7 --- /dev/null +++ b/cmoa_install.yaml @@ -0,0 +1,13 @@ +--- +- hosts: cluster + become: true + gather_facts: true + environment: + KUBECONFIG: /root/.kube/ansible_config + roles: + - role: cmoa_os_setting + - role: cmoa_install + delegate_to: 127.0.0.1 + - role: cmoa_demo_install + delegate_to: 127.0.0.1 + diff --git a/inventory b/inventory new file mode 100755 index 0000000..ffc1423 --- /dev/null +++ b/inventory @@ -0,0 +1,22 @@ +[master] +10.10.10.10 + +[worker1] +10.10.10.11 + +[worker2] +10.10.10.12 + +[cluster:children] +master +worker1 +worker2 + +[master:vars] +kubernetes_role="master" + +[worker1:vars] +kubernetes_role="node" + +[worker2:vars] +kubernetes_role="node" diff --git a/roles/cmoa_demo_install/defaults/main.yml b/roles/cmoa_demo_install/defaults/main.yml new file mode 100644 index 0000000..11b9651 --- /dev/null +++ b/roles/cmoa_demo_install/defaults/main.yml @@ -0,0 +1,64 @@ +# helm file install +helm_checksum: sha256:950439759ece902157cf915b209b8d694e6f675eaab5099fb7894f30eeaee9a2 +helm_version: v3.10.3 + +# cmoa info +cmoa_namespace: imxc +cmoa_version: rel3.4.8 + +# default ip/version (not change) +before_ip: 111.111.111.111 +before_version: rel0.0.0 + +# files/00-default in role +docker_secret_file: secret_nexus.yaml + +# all, jaeger, jspd +imxc_ui: all + +# [docker_config_path] +docker_config_nexus: dockerconfig/docker_config_nexus.json + +# [jaeger] +jaeger_servicename: imxc-ui-service-jaeger +jaeger_service_port: 80 +jaeger_nodePort: 31080 # only imxc-ui-jaeger option (imxc-ui-jaeger template default port=31084) + +# [minio] +minio_service_name: minio +minio_service_port: 9000 +minio_nodePort: 32002 +minio_user: cloudmoa +minio_pass: admin1234 +bucket_name: cortex-bucket +days: 42 +rule_id: cloudmoa + +# [Elasticsearch] +elasticsearch_service_name: elasticsearch +elasticsearch_service_port: 9200 +elasticsearch_nodePort: 30200 + +# [Keycloak] +# Keycloak configuration settings +keycloak_http_port: 31082 +keycloak_https_port: 8443 +keycloak_management_http_port: 31990 +keycloak_realm: exem + +# Keycloak administration console user +keycloak_admin_user: admin +keycloak_admin_password: admin +keycloak_auth_realm: master +keycloak_auth_client: admin-cli +keycloak_context: /auth + +# keycloak_clients +keycloak_clients: + - name: 'authorization_server' + client_id: authorization_server + realm: exem + redirect_uris: "http://10.10.30.75:31080/*,http://10.10.30.75:31084/*,http://localhost:8080/*,http://localhost:8081/*" + public_client: True + + diff --git a/roles/cmoa_demo_install/files/00-default/sa_patch.sh b/roles/cmoa_demo_install/files/00-default/sa_patch.sh new file mode 100755 index 0000000..618a35b --- /dev/null +++ b/roles/cmoa_demo_install/files/00-default/sa_patch.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +export KUBECONFIG=$1 + +kubectl wait node --for=condition=ready --all --timeout=60s + +#kubectl -n imxc patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' +kubectl -n default patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' diff --git a/roles/cmoa_demo_install/files/00-default/secret_dockerhub.yaml b/roles/cmoa_demo_install/files/00-default/secret_dockerhub.yaml new file mode 100644 index 0000000..268027b --- /dev/null +++ b/roles/cmoa_demo_install/files/00-default/secret_dockerhub.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: regcred +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOiB7CiAgICAgICJhdXRoIjogIlpYaGxiV1JsZGpJNk0yWXlObVV6T0RjdFlqY3paQzAwTkRVMUxUazNaRFV0T1dWaU9EWmtObVl4WXpOayIKICAgIH0KICB9Cn0KCg== +type: kubernetes.io/dockerconfigjson diff --git a/roles/cmoa_demo_install/files/00-default/secret_nexus.yaml b/roles/cmoa_demo_install/files/00-default/secret_nexus.yaml new file mode 100644 index 0000000..6a2543f --- /dev/null +++ b/roles/cmoa_demo_install/files/00-default/secret_nexus.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICIxMC4xMC4zMS4yNDM6NTAwMCI6IHsKICAgICAgImF1dGgiOiAiWTI5eVpUcGpiM0psWVdSdGFXNHhNak0wIgogICAgfQogIH0KfQoK +kind: Secret +metadata: + name: regcred +type: kubernetes.io/dockerconfigjson + diff --git a/roles/cmoa_demo_install/files/01-storage/00-storageclass.yaml b/roles/cmoa_demo_install/files/01-storage/00-storageclass.yaml new file mode 100644 index 0000000..8f41292 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/00-storageclass.yaml @@ -0,0 +1,6 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: exem-local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer diff --git a/roles/cmoa_demo_install/files/01-storage/01-persistentvolume.yaml b/roles/cmoa_demo_install/files/01-storage/01-persistentvolume.yaml new file mode 100644 index 0000000..1bd4546 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/01-persistentvolume.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-0 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv1 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-1 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv2 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-2 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv3 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-3 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv4 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 diff --git a/roles/cmoa_demo_install/files/01-storage/cmoa_minio b/roles/cmoa_demo_install/files/01-storage/cmoa_minio new file mode 100755 index 0000000..522b87d --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/cmoa_minio @@ -0,0 +1,63 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, time, urllib3 +from minio import Minio +from minio.lifecycleconfig import Expiration, LifecycleConfig, Rule, Transition +from minio.commonconfig import ENABLED, Filter + +def minio_conn(ipaddr, portnum, ac_key, sec_key): + conn='{}:{}'.format(ipaddr,portnum) + url='http://{}'.format(conn) + print(url) + minio_client = Minio( + conn, access_key=ac_key, secret_key=sec_key, secure=False, + http_client=urllib3.ProxyManager( + url, timeout=urllib3.Timeout.DEFAULT_TIMEOUT, + retries=urllib3.Retry( + total=5, backoff_factor=0.2, + status_forcelist=[ + 500, 502, 503, 504 + ], + ), + ), + ) + + return minio_client + +def minio_create_buckets(minio_client, bucket_name, days, rule_id="cloudmoa"): + config = LifecycleConfig( + [ + Rule( + ENABLED, + rule_filter=Filter(prefix=""), + rule_id=rule_id, + expiration=Expiration(days=days), + ), + ], + ) + minio_client.set_bucket_lifecycle(bucket_name, config) + +def minio_delete_bucket(client, bucket_name): + client.delete_bucket_lifecycle(bucket_name) + +def main(): + s3_url = os.sys.argv[1].split(':')[0] + s3_url_port = os.sys.argv[1].split(':')[1] + minio_user = os.sys.argv[2] + minio_pass = os.sys.argv[3] + bucket_name = os.sys.argv[4] + minio_days = os.sys.argv[5] + rule_id = os.sys.argv[6] + + print(s3_url, s3_url_port, minio_user, minio_pass) + + minio_client=minio_conn(s3_url, s3_url_port, minio_user, minio_pass) + minio_create_buckets(minio_client, bucket_name, minio_days, rule_id) + +if __name__ == "__main__": + try: + main() + except Exception as err: + print("[Usage] minio {url:port} {username} {password} {bucketName} {days} {ruleId}") + print(err) \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/01-storage/minio/.helmignore b/roles/cmoa_demo_install/files/01-storage/minio/.helmignore new file mode 100644 index 0000000..a9fe727 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/01-storage/minio/Chart.yaml b/roles/cmoa_demo_install/files/01-storage/minio/Chart.yaml new file mode 100644 index 0000000..fc21076 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +description: Multi-Cloud Object Storage +name: minio +version: 4.0.2 +appVersion: RELEASE.2022-05-08T23-50-31Z +keywords: + - minio + - storage + - object-storage + - s3 + - cluster +home: https://min.io +icon: https://min.io/resources/img/logo/MINIO_wordmark.png +sources: +- https://github.com/minio/minio +maintainers: +- name: MinIO, Inc + email: dev@minio.io diff --git a/roles/cmoa_demo_install/files/01-storage/minio/README.md b/roles/cmoa_demo_install/files/01-storage/minio/README.md new file mode 100644 index 0000000..ad3eb7d --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/README.md @@ -0,0 +1,235 @@ +# MinIO Helm Chart + +[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/minio/blob/master/LICENSE) + +MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. + +For more detailed documentation please visit [here](https://docs.minio.io/) + +## Introduction + +This chart bootstraps MinIO Cluster on [Kubernetes](http://kubernetes.io) using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Helm cli with Kubernetes cluster configured. +- PV provisioner support in the underlying infrastructure. (We recommend using ) +- Use Kubernetes version v1.19 and later for best experience. + +## Configure MinIO Helm repo + +```bash +helm repo add minio https://charts.min.io/ +``` + +### Installing the Chart + +Install this chart using: + +```bash +helm install --namespace minio --set rootUser=rootuser,rootPassword=rootpass123 --generate-name minio/minio +``` + +The command deploys MinIO on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +### Upgrading the Chart + +You can use Helm to update MinIO version in a live release. Assuming your release is named as `my-release`, get the values using the command: + +```bash +helm get values my-release > old_values.yaml +``` + +Then change the field `image.tag` in `old_values.yaml` file with MinIO image tag you want to use. Now update the chart using + +```bash +helm upgrade -f old_values.yaml my-release minio/minio +``` + +Default upgrade strategies are specified in the `values.yaml` file. Update these fields if you'd like to use a different strategy. + +### Configuration + +Refer the [Values file](./values.yaml) for all the possible config fields. + +You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +helm install --name my-release --set persistence.size=1Ti minio/minio +``` + +The above command deploys MinIO server with a 1Ti backing persistent volume. + +Alternately, you can provide a YAML file that specifies parameter values while installing the chart. For example, + +```bash +helm install --name my-release -f values.yaml minio/minio +``` + +### Persistence + +This chart provisions a PersistentVolumeClaim and mounts corresponding persistent volume to default location `/export`. You'll need physical storage available in the Kubernetes cluster for this to work. If you'd rather use `emptyDir`, disable PersistentVolumeClaim by: + +```bash +helm install --set persistence.enabled=false minio/minio +``` + +> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."* + +### Existing PersistentVolumeClaim + +If a Persistent Volume Claim already exists, specify it during installation. + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +helm install --set persistence.existingClaim=PVC_NAME minio/minio +``` + +### NetworkPolicy + +To enable network policy for MinIO, +install [a networking plugin that implements the Kubernetes +NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for *all* pods in the namespace: + +``` +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 9000. + +For more precise policy, set `networkPolicy.allowExternal=true`. This will +only allow pods with the generated client label to connect to MinIO. +This label will be displayed in the output of a successful install. + +### Existing secret + +Instead of having this chart create the secret for you, you can supply a preexisting secret, much +like an existing PersistentVolumeClaim. + +First, create the secret: + +```bash +kubectl create secret generic my-minio-secret --from-literal=rootUser=foobarbaz --from-literal=rootPassword=foobarbazqux +``` + +Then install the chart, specifying that you want to use an existing secret: + +```bash +helm install --set existingSecret=my-minio-secret minio/minio +``` + +The following fields are expected in the secret: + +| .data.\ in Secret | Corresponding variable | Description | Required | +|:------------------------|:-----------------------|:---------------|:---------| +| `rootUser` | `rootUser` | Root user. | yes | +| `rootPassword` | `rootPassword` | Root password. | yes | + +All corresponding variables will be ignored in values file. + +### Configure TLS + +To enable TLS for MinIO containers, acquire TLS certificates from a CA or create self-signed certificates. While creating / acquiring certificates ensure the corresponding domain names are set as per the standard [DNS naming conventions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-identity) in a Kubernetes StatefulSet (for a distributed MinIO setup). Then create a secret using + +```bash +kubectl create secret generic tls-ssl-minio --from-file=path/to/private.key --from-file=path/to/public.crt +``` + +Then install the chart, specifying that you want to use the TLS secret: + +```bash +helm install --set tls.enabled=true,tls.certSecret=tls-ssl-minio minio/minio +``` + +### Installing certificates from third party CAs + +MinIO can connect to other servers, including MinIO nodes or other server types such as NATs and Redis. If these servers use certificates that were not registered with a known CA, add trust for these certificates to MinIO Server by bundling these certificates into a Kubernetes secret and providing it to Helm via the `trustedCertsSecret` value. If `.Values.tls.enabled` is `true` and you're installing certificates for third party CAs, remember to include MinIO's own certificate with key `public.crt`, if it also needs to be trusted. + +For instance, given that TLS is enabled and you need to add trust for MinIO's own CA and for the CA of a Keycloak server, a Kubernetes secret can be created from the certificate files using `kubectl`: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=public.crt --from-file=keycloak.crt +``` + +If TLS is not enabled, you would need only the third party CA: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=keycloak.crt +``` + +The name of the generated secret can then be passed to Helm using a values file or the `--set` parameter: + +``` +trustedCertsSecret: "minio-trusted-certs" + +or + +--set trustedCertsSecret=minio-trusted-certs +``` + +### Create buckets after install + +Install the chart, specifying the buckets you want to create after install: + +```bash +helm install --set buckets[0].name=bucket1,buckets[0].policy=none,buckets[0].purge=false minio/minio +``` + +Description of the configuration parameters used above - + +- `buckets[].name` - name of the bucket to create, must be a string with length > 0 +- `buckets[].policy` - can be one of none|download|upload|public +- `buckets[].purge` - purge if bucket exists already + +33# Create policies after install +Install the chart, specifying the policies you want to create after install: + +```bash +helm install --set policies[0].name=mypolicy,policies[0].statements[0].resources[0]='arn:aws:s3:::bucket1',policies[0].statements[0].actions[0]='s3:ListBucket',policies[0].statements[0].actions[1]='s3:GetObject' minio/minio +``` + +Description of the configuration parameters used above - + +- `policies[].name` - name of the policy to create, must be a string with length > 0 +- `policies[].statements[]` - list of statements, includes actions and resources +- `policies[].statements[].resources[]` - list of resources that applies the statement +- `policies[].statements[].actions[]` - list of actions granted + +### Create user after install + +Install the chart, specifying the users you want to create after install: + +```bash +helm install --set users[0].accessKey=accessKey,users[0].secretKey=secretKey,users[0].policy=none,users[1].accessKey=accessKey2,users[1].secretRef=existingSecret,users[1].secretKey=password,users[1].policy=none minio/minio +``` + +Description of the configuration parameters used above - + +- `users[].accessKey` - accessKey of user +- `users[].secretKey` - secretKey of usersecretRef +- `users[].existingSecret` - secret name that contains the secretKey of user +- `users[].existingSecretKey` - data key in existingSecret secret containing the secretKey +- `users[].policy` - name of the policy to assign to user + +## Uninstalling the Chart + +Assuming your release is named as `my-release`, delete it using the command: + +```bash +helm delete my-release +``` + +or + +```bash +helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/NOTES.txt b/roles/cmoa_demo_install/files/01-storage/minio/templates/NOTES.txt new file mode 100644 index 0000000..9337196 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/NOTES.txt @@ -0,0 +1,43 @@ +{{- if eq .Values.service.type "ClusterIP" "NodePort" }} +MinIO can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: +{{ template "minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +To access MinIO from localhost, run the below commands: + + 1. export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + + 2. kubectl port-forward $POD_NAME 9000 --namespace {{ .Release.Namespace }} + +Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ + +You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }}-local + +{{- end }} +{{- if eq .Values.service.type "LoadBalancer" }} +MinIO can be accessed via port {{ .Values.service.port }} on an external IP address. Get the service external IP address by: +kubectl get svc --namespace {{ .Release.Namespace }} -l app={{ template "minio.fullname" . }} + +Note that the public IP may take a couple of minutes to be available. + +You can now access MinIO server on http://:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret {{ template "minio.secretName" . }} --namespace {{ .Release.Namespace }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }} + +Alternately, you can use your browser or the MinIO SDK to access the server - https://docs.minio.io/categories/17 +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "minio.fullname" . }}-client=true" +will be able to connect to this minio cluster. +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_bucket.txt b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_bucket.txt new file mode 100644 index 0000000..35a48fc --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_bucket.txt @@ -0,0 +1,109 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkBucketExists ($bucket) +# Check if the bucket exists, by using the exit code of `mc ls` +checkBucketExists() { + BUCKET=$1 + CMD=$(${MC} ls myminio/$BUCKET > /dev/null 2>&1) + return $? +} + +# createBucket ($bucket, $policy, $purge) +# Ensure bucket exists, purging if asked to +createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + VERSIONING=$4 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + ${MC} rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist + if ! checkBucketExists $BUCKET ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + + + # set versioning for bucket + if [ ! -z $VERSIONING ] ; then + if [ $VERSIONING = true ] ; then + echo "Enabling versioning for '$BUCKET'" + ${MC} version enable myminio/$BUCKET + elif [ $VERSIONING = false ] ; then + echo "Suspending versioning for '$BUCKET'" + ${MC} version suspend myminio/$BUCKET + fi + else + echo "Bucket '$BUCKET' versioning unchanged." + fi + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + ${MC} policy set $POLICY myminio/$BUCKET +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.buckets }} +{{ $global := . }} +# Create the buckets +{{- range .Values.buckets }} +createBucket {{ tpl .name $global }} {{ .policy }} {{ .purge }} {{ .versioning }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_policy.txt b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_policy.txt new file mode 100644 index 0000000..d565b16 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_policy.txt @@ -0,0 +1,75 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkPolicyExists ($policy) +# Check if the policy exists, by using the exit code of `mc admin policy info` +checkPolicyExists() { + POLICY=$1 + CMD=$(${MC} admin policy info myminio $POLICY > /dev/null 2>&1) + return $? +} + +# createPolicy($name, $filename) +createPolicy () { + NAME=$1 + FILENAME=$2 + + # Create the name if it does not exist + echo "Checking policy: $NAME (in /config/$FILENAME.json)" + if ! checkPolicyExists $NAME ; then + echo "Creating policy '$NAME'" + else + echo "Policy '$NAME' already exists." + fi + ${MC} admin policy add myminio $NAME /config/$FILENAME.json + +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.policies }} +# Create the policies +{{- range $idx, $policy := .Values.policies }} +createPolicy {{ $policy.name }} policy_{{ $idx }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_user.txt b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_user.txt new file mode 100644 index 0000000..7771428 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_create_user.txt @@ -0,0 +1,88 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkUserExists ($username) +# Check if the user exists, by using the exit code of `mc admin user info` +checkUserExists() { + USER=$1 + CMD=$(${MC} admin user info myminio $USER > /dev/null 2>&1) + return $? +} + +# createUser ($username, $password, $policy) +createUser() { + USER=$1 + PASS=$2 + POLICY=$3 + + # Create the user if it does not exist + if ! checkUserExists $USER ; then + echo "Creating user '$USER'" + ${MC} admin user add myminio $USER $PASS + else + echo "User '$USER' already exists." + fi + + + # set policy for user + if [ ! -z $POLICY -a $POLICY != " " ] ; then + echo "Adding policy '$POLICY' for '$USER'" + ${MC} admin policy set myminio $POLICY user=$USER + else + echo "User '$USER' has no policy attached." + fi +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.users }} +{{ $global := . }} +# Create the users +{{- range .Values.users }} +{{- if .existingSecret }} +createUser {{ tpl .accessKey $global }} $(cat /config/secrets/{{ tpl .accessKey $global }}) {{ .policy }} +{{ else }} +createUser {{ tpl .accessKey $global }} {{ .secretKey }} {{ .policy }} +{{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_custom_command.txt b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_custom_command.txt new file mode 100644 index 0000000..b583a77 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_custom_command.txt @@ -0,0 +1,58 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# runCommand ($@) +# Run custom mc command +runCommand() { + ${MC} "$@" + return $? +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.customCommands }} +# Run custom commands +{{- range .Values.customCommands }} +runCommand {{ .command }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_policy.tpl b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_policy.tpl new file mode 100644 index 0000000..83a2e15 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helper_policy.tpl @@ -0,0 +1,18 @@ +{{- $statements_length := len .statements -}} +{{- $statements_length := sub $statements_length 1 -}} +{ + "Version": "2012-10-17", + "Statement": [ +{{- range $i, $statement := .statements }} + { + "Effect": "Allow", + "Action": [ +"{{ $statement.actions | join "\",\n\"" }}" + ]{{ if $statement.resources }}, + "Resource": [ +"{{ $statement.resources | join "\",\n\"" }}" + ]{{ end }} + }{{ if lt $i $statements_length }},{{end }} +{{- end }} + ] +} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/_helpers.tpl b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helpers.tpl new file mode 100644 index 0000000..4e38194 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/_helpers.tpl @@ -0,0 +1,218 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "minio.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "minio.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "minio.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "minio.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.Version -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare ">=1.7-0, <1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else if semverCompare "^1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "minio.deployment.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "minio.statefulset.apiVersion" -}} +{{- if semverCompare "<1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "minio.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for console ingress. +*/}} +{{- define "minio.consoleIngress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Determine secret name. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.existingSecret -}} +{{- .Values.existingSecret }} +{{- else -}} +{{- include "minio.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Determine name for scc role and rolebinding +*/}} +{{- define "minio.sccRoleName" -}} +{{- printf "%s-%s" "scc" (include "minio.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Properly format optional additional arguments to MinIO binary +*/}} +{{- define "minio.extraArgs" -}} +{{- range .Values.extraArgs -}} +{{ " " }}{{ . }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- end -}} + +{{/* +Formats volumeMount for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolumeMount" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + mountPath: {{ .Values.certsPath }} +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $casPath := printf "%s/CAs" .Values.certsPath | clean }} +- name: trusted-cert-secret-volume + mountPath: {{ $casPath }} +{{- end }} +{{- end -}} + +{{/* +Formats volume for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolume" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: public.crt + - key: {{ .Values.tls.privateKey }} + path: private.key +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $certSecret := eq .Values.trustedCertsSecret "" | ternary .Values.tls.certSecret .Values.trustedCertsSecret }} +{{- $publicCrt := eq .Values.trustedCertsSecret "" | ternary .Values.tls.publicCrt "" }} +- name: trusted-cert-secret-volume + secret: + secretName: {{ $certSecret }} + {{- if ne $publicCrt "" }} + items: + - key: {{ $publicCrt }} + path: public.crt + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "minio.getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{- define "minio.root.username" -}} + {{- if .Values.rootUser }} + {{- .Values.rootUser | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 20 "Key" "rootUser") }} + {{- end }} +{{- end -}} + +{{- define "minio.root.password" -}} + {{- if .Values.rootPassword }} + {{- .Values.rootPassword | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 40 "Key" "rootPassword") }} + {{- end }} +{{- end -}} \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/configmap.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/configmap.yaml new file mode 100644 index 0000000..95a7c60 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/configmap.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + initialize: |- +{{ include (print $.Template.BasePath "/_helper_create_bucket.txt") . | indent 4 }} + add-user: |- +{{ include (print $.Template.BasePath "/_helper_create_user.txt") . | indent 4 }} + add-policy: |- +{{ include (print $.Template.BasePath "/_helper_create_policy.txt") . | indent 4 }} +{{- range $idx, $policy := .Values.policies }} + # {{ $policy.name }} + policy_{{ $idx }}.json: |- +{{ include (print $.Template.BasePath "/_helper_policy.tpl") . | indent 4 }} +{{ end }} + custom-command: |- +{{ include (print $.Template.BasePath "/_helper_custom_command.txt") . | indent 4 }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/console-ingress.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/console-ingress.yaml new file mode 100644 index 0000000..2ce9a93 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/console-ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.consoleIngress.enabled -}} +{{- $fullName := printf "%s-console" (include "minio.fullname" .) -}} +{{- $servicePort := .Values.consoleService.port -}} +{{- $ingressPath := .Values.consoleIngress.path -}} +apiVersion: {{ template "minio.consoleIngress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.consoleIngress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.consoleIngress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.consoleIngress.ingressClassName }} + ingressClassName: {{ .Values.consoleIngress.ingressClassName }} +{{- end }} +{{- if .Values.consoleIngress.tls }} + tls: + {{- range .Values.consoleIngress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.consoleIngress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/console-service.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/console-service.yaml new file mode 100644 index 0000000..f4b1294 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/console-service.yaml @@ -0,0 +1,48 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-console + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.consoleService.annotations }} + annotations: +{{ toYaml .Values.consoleService.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.consoleService.type "ClusterIP" "") (empty .Values.consoleService.type)) }} + type: ClusterIP + {{- if not (empty .Values.consoleService.clusterIP) }} + clusterIP: {{ .Values.consoleService.clusterIP }} + {{end}} +{{- else if eq .Values.consoleService.type "LoadBalancer" }} + type: {{ .Values.consoleService.type }} + loadBalancerIP: {{ default "" .Values.consoleService.loadBalancerIP }} +{{- else }} + type: {{ .Values.consoleService.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.consoleService.port }} + protocol: TCP +{{- if (and (eq .Values.consoleService.type "NodePort") ( .Values.consoleService.nodePort)) }} + nodePort: {{ .Values.consoleService.nodePort }} +{{- else }} + targetPort: {{ .Values.consoleService.port }} +{{- end}} +{{- if .Values.consoleService.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.consoleService.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/deployment.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/deployment.yaml new file mode 100644 index 0000000..a06bc35 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/deployment.yaml @@ -0,0 +1,174 @@ +{{- if eq .Values.mode "standalone" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: 1 + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio server {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }}" + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client_cert.pem" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client_cert_key.pem" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/gateway-deployment.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/gateway-deployment.yaml new file mode 100644 index 0000000..b14f86b --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/gateway-deployment.yaml @@ -0,0 +1,173 @@ +{{- if eq .Values.mode "gateway" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: {{ .Values.gateway.replicas }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + {{- if eq .Values.gateway.type "nas" }} + - "/usr/bin/docker-entrypoint.sh minio gateway nas {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }} " + {{- end }} + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client.crt" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client.key" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/ingress.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/ingress.yaml new file mode 100644 index 0000000..8d9a837 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "minio.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: {{ template "minio.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/networkpolicy.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/networkpolicy.yaml new file mode 100644 index 0000000..68a2599 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/networkpolicy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + ingress: + - ports: + - port: {{ .Values.service.port }} + - port: {{ .Values.consoleService.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "minio.name" . }}-client: "true" + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..8037eb7 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: minio + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} +spec: + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + app: {{ template "minio.name" . }} +{{- end }} \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml new file mode 100644 index 0000000..434b31d --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.buckets }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-bucket-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-bucket-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeBucketJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeBucketJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeBucketJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeBucketJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeBucketJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeBucketJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeBucketJob.resources | indent 10 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml new file mode 100644 index 0000000..ae78769 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.policies }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-policies-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-policies-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makePolicyJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.podAnnotations }} + annotations: +{{ toYaml .Values.makePolicyJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makePolicyJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makePolicyJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makePolicyJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makePolicyJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-policy"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makePolicyJob.resources | indent 10 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-user-job.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-user-job.yaml new file mode 100644 index 0000000..d3750e8 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-create-user-job.yaml @@ -0,0 +1,97 @@ +{{- $global := . -}} +{{- if .Values.users }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-user-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-user-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeUserJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeUserJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeUserJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeUserJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeUserJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeUserJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- range .Values.users }} + {{- if .existingSecret }} + - secret: + name: {{ tpl .existingSecret $global }} + items: + - key: {{ .existingSecretKey }} + path: secrets/{{ tpl .accessKey $global }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-user"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeUserJob.resources | indent 10 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-custom-command.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-custom-command.yaml new file mode 100644 index 0000000..7e83faf --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/post-install-custom-command.yaml @@ -0,0 +1,87 @@ +{{- if .Values.customCommands }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-custom-command-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-custom-command-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.customCommandJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.podAnnotations }} + annotations: +{{ toYaml .Values.customCommandJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.customCommandJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.customCommandJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.customCommandJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.customCommandJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/custom-command"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.customCommandJob.resources | indent 10 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/pvc.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/pvc.yaml new file mode 100644 index 0000000..369aade --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/pvc.yaml @@ -0,0 +1,35 @@ +{{- if eq .Values.mode "standalone" }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.persistence.VolumeName }} + volumeName: "{{ .Values.persistence.VolumeName }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/secrets.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/secrets.yaml new file mode 100644 index 0000000..da2ecab --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/secrets.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "minio.secretName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + rootUser: {{ include "minio.root.username" . | b64enc | quote }} + rootPassword: {{ include "minio.root.password" . | b64enc | quote }} + {{- if .Values.etcd.clientCert }} + etcd_client.crt: {{ .Values.etcd.clientCert | toString | b64enc | quote }} + {{- end }} + {{- if .Values.etcd.clientCertKey }} + etcd_client.key: {{ .Values.etcd.clientCertKey | toString | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/securitycontextconstraints.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/securitycontextconstraints.yaml new file mode 100644 index 0000000..4bac7e3 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/securitycontextconstraints.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.securityContext.enabled .Values.persistence.enabled (.Capabilities.APIVersions.Has "security.openshift.io/v1") }} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: [] +readOnlyRootFilesystem: false +defaultAddCapabilities: [] +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +fsGroup: + type: MustRunAs + ranges: + - max: {{ .Values.securityContext.fsGroup }} + min: {{ .Values.securityContext.fsGroup }} +runAsUser: + type: MustRunAs + uid: {{ .Values.securityContext.runAsUser }} +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/service.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/service.yaml new file mode 100644 index 0000000..64aa990 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/service.yaml @@ -0,0 +1,49 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + monitoring: "true" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }} + type: ClusterIP + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP +{{- if (and (eq .Values.service.type "NodePort") ( .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} +{{- else }} + targetPort: 9000 +{{- end}} +{{- if .Values.service.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.service.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/serviceaccount.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/serviceaccount.yaml new file mode 100644 index 0000000..6a4bd94 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/serviceaccount.yaml @@ -0,0 +1,7 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/servicemonitor.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/servicemonitor.yaml new file mode 100644 index 0000000..809848f --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/servicemonitor.yaml @@ -0,0 +1,51 @@ +{{- if .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "minio.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{ else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- if .Values.tls.enabled }} + - port: https + scheme: https + {{ else }} + - port: http + scheme: http + {{- end }} + path: /minio/v2/metrics/cluster + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelConfigs }} +{{ toYaml .Values.metrics.serviceMonitor.relabelConfigs | indent 6 }} + {{- end }} + {{- if not .Values.metrics.serviceMonitor.public }} + bearerTokenSecret: + name: {{ template "minio.fullname" . }}-prometheus + key: token + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: + app: {{ include "minio.name" . }} + release: {{ .Release.Name }} + monitoring: "true" +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/templates/statefulset.yaml b/roles/cmoa_demo_install/files/01-storage/minio/templates/statefulset.yaml new file mode 100644 index 0000000..b4160f0 --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/templates/statefulset.yaml @@ -0,0 +1,217 @@ +{{- if eq .Values.mode "distributed" }} +{{ $poolCount := .Values.pools | int }} +{{ $nodeCount := .Values.replicas | int }} +{{ $drivesPerNode := .Values.drivesPerNode | int }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $mountPath := .Values.mountPath }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +{{ $subPath := .Values.persistence.subPath }} +{{ $penabled := .Values.persistence.enabled }} +{{ $accessMode := .Values.persistence.accessMode }} +{{ $storageClass := .Values.persistence.storageClass }} +{{ $psize := .Values.persistence.size }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-svc + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + publishNotReadyAddresses: true + clusterIP: None + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +--- +apiVersion: {{ template "minio.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + updateStrategy: + type: {{ .Values.StatefulSetUpdate.updateStrategy }} + podManagementPolicy: "Parallel" + serviceName: {{ template "minio.fullname" . }}-svc + replicas: {{ mul $poolCount $nodeCount }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + + command: [ "/bin/sh", + "-ce", + "/usr/bin/docker-entrypoint.sh minio server {{- range $i := until $poolCount }}{{ $factor := mul $i $nodeCount }}{{ $endIndex := add $factor $nodeCount }}{{ $beginIndex := mul $i $nodeCount }} {{ $scheme }}://{{ template `minio.fullname` $ }}-{{ `{` }}{{ $beginIndex }}...{{ sub $endIndex 1 }}{{ `}`}}.{{ template `minio.fullname` $ }}-svc.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}{{if (gt $drivesPerNode 1)}}{{ $bucketRoot }}-{{ `{` }}0...{{ sub $drivesPerNode 1 }}{{ `}` }}{{else}}{{ $bucketRoot }}{{end}}{{- end}} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template `minio.extraArgs` . }}" ] + volumeMounts: + {{- if $penabled }} + {{- if (gt $drivesPerNode 1) }} + {{- range $i := until $drivesPerNode }} + - name: export-{{ $i }} + mountPath: {{ $mountPath }}-{{ $i }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- else }} + - name: export + mountPath: {{ $mountPath }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode}} + - metadata: + name: export-{{ $diskId }} + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} + {{- else }} + - metadata: + name: export + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/01-storage/minio/values.yaml b/roles/cmoa_demo_install/files/01-storage/minio/values.yaml new file mode 100644 index 0000000..a957f7f --- /dev/null +++ b/roles/cmoa_demo_install/files/01-storage/minio/values.yaml @@ -0,0 +1,461 @@ +## Provide a name in place of minio for `app:` labels +## +nameOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## set kubernetes cluster domain where minio is running +## +clusterDomain: cluster.local + +## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the +## +image: + repository: 10.10.31.243:5000/cmoa3/minio + tag: RELEASE.2022-05-08T23-50-31Z + pullPolicy: IfNotPresent + +imagePullSecrets: + - name: "regcred" +# - name: "image-pull-secret" + +## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio +## client used to create a default bucket). +## +mcImage: + repository: 10.10.31.243:5000/cmoa3/mc + tag: RELEASE.2022-05-09T04-08-26Z + pullPolicy: IfNotPresent + +## minio mode, i.e. standalone or distributed or gateway. +mode: distributed ## other supported values are "standalone", "gateway" + +## Additional labels to include with deployment or statefulset +additionalLabels: [] + +## Additional annotations to include with deployment or statefulset +additionalAnnotations: [] + +## Typically the deployment/statefulset includes checksums of secrets/config, +## So that when these change on a subsequent helm install, the deployment/statefulset +## is restarted. This can result in unnecessary restarts under GitOps tooling such as +## flux, so set to "true" to disable this behaviour. +ignoreChartChecksums: false + +## Additional arguments to pass to minio binary +extraArgs: [] + +## Port number for MinIO S3 API Access +minioAPIPort: "9000" + +## Port number for MinIO Browser COnsole Access +minioConsolePort: "9001" + +## Update strategy for Deployments +DeploymentUpdate: + type: RollingUpdate + maxUnavailable: 0 + maxSurge: 100% + +## Update strategy for StatefulSets +StatefulSetUpdate: + updateStrategy: RollingUpdate + +## Pod priority settings +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Set default rootUser, rootPassword +## AccessKey and secretKey is generated when not set +## Distributed MinIO ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +rootUser: "admin" +rootPassword: "passW0rd" + +## Use existing Secret that store following variables: +## +## | Chart var | .data. in Secret | +## |:----------------------|:-------------------------| +## | rootUser | rootUser | +## | rootPassword | rootPassword | +## +## All mentioned variables will be ignored in values file. +## .data.rootUser and .data.rootPassword are mandatory, +## others depend on enabled status of corresponding sections. +existingSecret: "" + +## Directory on the MinIO pof +certsPath: "/etc/minio/certs/" +configPathmc: "/etc/minio/mc/" + +## Path where PV would be mounted on the MinIO Pod +mountPath: "/export" +## Override the root directory which the minio server should serve from. +## If left empty, it defaults to the value of {{ .Values.mountPath }} +## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }} +## +bucketRoot: "" + +# Number of drives attached to a node +drivesPerNode: 2 +# Number of MinIO containers running +#replicas: 16 +replicas: 2 +# Number of expanded MinIO clusters +pools: 1 + +# Deploy if 'mode == gateway' - 4 replicas. +gateway: + type: "nas" # currently only "nas" are supported. + replicas: 4 + +## TLS Settings for MinIO +tls: + enabled: false + ## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + certSecret: "" + publicCrt: public.crt + privateKey: private.key + +## Trusted Certificates Settings for MinIO. Ref: https://docs.minio.io/docs/how-to-secure-access-to-minio-server-with-tls#install-certificates-from-third-party-cas +## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret +## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt. +## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret. +trustedCertsSecret: "" + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + annotations: {} + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## minio data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + ## Storage class of PV to bind. By default it looks for standard storage class. + ## If the PV uses a different storage class, specify that here. + storageClass: "exem-local-storage" + VolumeName: "" + accessMode: ReadWriteOnce + size: 50Gi + + ## If subPath is set mount a sub folder of a volume instead of the root of the volume. + ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). + ## + subPath: "" + +## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +#service: +# type: NodePort +# clusterIP: ~ + ## Make sure to match it to minioAPIPort +# port: "9000" +# nodePort: "32002" + +service: + type: ClusterIP + clusterIP: ~ + ## Make sure to match it to minioAPIPort + port: "9000" + +## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## + +ingress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +consoleService: + type: NodePort + clusterIP: ~ + ## Make sure to match it to minioConsolePort + port: "9001" + nodePort: "32001" + +consoleIngress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - console.minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +tolerations: [] +affinity: {} + +## Add stateful containers to have security context, if enabled MinIO will run as this +## user and group NOTE: securityContext is only enabled if persistence.enabled=true +securityContext: + enabled: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: "OnRootMismatch" + +# Additational pod annotations +podAnnotations: {} + +# Additional pod labels +podLabels: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + #memory: 16Gi + memory: 1Gi + cpu: 200m + +## List of policies to be created after minio install +## +## In addition to default policies [readonly|readwrite|writeonly|consoleAdmin|diagnostics] +## you can define additional policies with custom supported actions and resources +policies: [] +## writeexamplepolicy policy grants creation or deletion of buckets with name +## starting with example. In addition, grants objects write permissions on buckets starting with +## example. +# - name: writeexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:AbortMultipartUpload" +# - "s3:GetObject" +# - "s3:DeleteObject" +# - "s3:PutObject" +# - "s3:ListMultipartUploadParts" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:CreateBucket" +# - "s3:DeleteBucket" +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## readonlyexamplepolicy policy grants access to buckets with name starting with example. +## In addition, grants objects read permissions on buckets starting with example. +# - name: readonlyexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:GetObject" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## Additional Annotations for the Kubernetes Job makePolicyJob +makePolicyJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of users to be created after minio install +## +users: + ## Username, password and policy to be assigned to the user + ## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics] + ## Add new policies as explained here https://docs.min.io/docs/minio-multi-user-quickstart-guide.html + ## NOTE: this will fail if LDAP is enabled in your MinIO deployment + ## make sure to disable this if you are using LDAP. + - accessKey: cloudmoa + secretKey: admin1234 + policy: consoleAdmin + # Or you can refer to specific secret + #- accessKey: externalSecret + # existingSecret: my-secret + # existingSecretKey: password + # policy: readonly + + +## Additional Annotations for the Kubernetes Job makeUserJob +makeUserJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of buckets to be created after minio install +## +buckets: + - name: cortex-bucket + policy: none + purge: false + versioning: false + + # # Name of the bucket + # - name: bucket1 + # # Policy to be set on the + # # bucket [none|download|upload|public] + # policy: none + # # Purge if bucket exists already + # purge: false + # # set versioning for + # # bucket [true|false] + # versioning: false + # - name: bucket2 + # policy: none + # purge: false + # versioning: true + +## Additional Annotations for the Kubernetes Job makeBucketJob +makeBucketJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of command to run after minio install +## NOTE: the mc command TARGET is always "myminio" +customCommands: + # - command: "admin policy set myminio consoleAdmin group='cn=ops,cn=groups,dc=example,dc=com'" + +## Additional Annotations for the Kubernetes Job customCommandJob +customCommandJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s) +## when Chart is deployed +environment: + ## Please refer for comprehensive list https://docs.min.io/minio/baremetal/reference/minio-server/minio-server.html + ## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io" + ## MINIO_BROWSER: "off" + +## The name of a secret in the same kubernetes namespace which contain secret values +## This can be useful for LDAP password, etc +## The key in the secret must be 'config.env' +## +# extraSecret: minio-extraenv + +networkPolicy: + enabled: false + allowExternal: true + +## PodDisruptionBudget settings +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +## Specify the service account to use for the MinIO pods. If 'create' is set to 'false' +## and 'name' is left unspecified, the account 'default' will be used. +serviceAccount: + create: true + ## The name of the service account to use. If 'create' is 'true', a service account with that name + ## will be created. + name: "minio-sa" + +metrics: + serviceMonitor: + enabled: false + public: true + additionalLabels: {} + relabelConfigs: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + +## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md +## Define endpoints to enable this section. +etcd: + endpoints: [] + pathPrefix: "" + corednsPathPrefix: "" + clientCert: "" + clientCertKey: "" diff --git a/roles/cmoa_demo_install/files/02-base/00-kafka-broker-config.yaml b/roles/cmoa_demo_install/files/02-base/00-kafka-broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/00-kafka-broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/roles/cmoa_demo_install/files/02-base/01-coredns.yaml b/roles/cmoa_demo_install/files/02-base/01-coredns.yaml new file mode 100644 index 0000000..c1cb74b --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/01-coredns.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-dns + kubernetes.io/name: coredns + name: coredns + namespace: kube-system +spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + - name: metrics + port: 9153 + protocol: TCP + targetPort: 9153 + selector: + k8s-app: kube-dns + sessionAffinity: None + type: ClusterIP + diff --git a/roles/cmoa_demo_install/files/02-base/base/.helmignore b/roles/cmoa_demo_install/files/02-base/base/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_demo_install/files/02-base/base/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/Chart.yaml new file mode 100644 index 0000000..74d1d30 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: base +version: 0.1.0 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/analysis/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/analysis/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/analysis/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/analysis/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/analysis/Chart.yaml new file mode 100644 index 0000000..74b9505 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/analysis/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: analysis +version: 0.1.0 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml new file mode 100644 index 0000000..21a9298 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml @@ -0,0 +1,87 @@ +#docker run -d --hostname my-rabbit --name some-rabbit -p 8080:15672 -p 5672:5672 rabbitmq:3-management + +--- +kind: Service +apiVersion: v1 +metadata: + name: metric-analyzer-master + namespace: imxc +spec: +# clusterIP: None # We need a headless service to allow the pods to discover each + ports: # other during autodiscover phase for cluster creation. + - name: http # A ClusterIP will prevent resolving dns requests for other pods + protocol: TCP # under the same service. + port: 15672 + targetPort: 15672 +# nodePort: 30001 + - name: amqp + protocol: TCP + port: 5672 + targetPort: 5672 +# nodePort: 30002 + selector: + app: metric-analyzer-master +# type: NodePort +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-master + name: metric-analyzer-master + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: metric-analyzer-master + template: + metadata: + labels: + app: metric-analyzer-master + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer:{{ .Values.global.METRIC_ANALYZER_MASTER_VERSION }} + imagePullPolicy: IfNotPresent + name: master +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: POSTGRES_SERVER + value: postgres + - name: POSTGRES_USER + value: admin + - name: POSTGRES_PW + value: eorbahrhkswp + - name: POSTGRES_DB + value: postgresdb + - name: PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: POSTGRES_PORT + value: "5432" + - name: ES_SERVER + value: elasticsearch + - name: ES_PORT + value: "9200" + - name: ES_ID + value: "elastic" + - name: ES_PWD + value: "elastic" + - name: LOG_LEVEL + value: INFO + - name: AI_TYPE + value: BASELINE + - name: BASELINE_SIZE + value: "3" + - name: CHECK_DAY + value: "2" + resources: + requests: + memory: "100Mi" diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml new file mode 100644 index 0000000..7e6eaea --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-worker + name: metric-analyzer-worker + namespace: imxc +spec: + replicas: 10 + selector: + matchLabels: + app: metric-analyzer-worker + template: + metadata: + labels: + app: metric-analyzer-worker + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer_worker:{{ .Values.global.METRIC_ANALYZER_WORKER_VERSION }} + imagePullPolicy: IfNotPresent + name: worker +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" +# volumes: +# - hostPath: +# path: /usr/share/zoneinfo/Asia/Seoul +# name: timezone-config + resources: + requests: + memory: "100Mi" diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/analysis/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/analysis/values.yaml new file mode 100644 index 0000000..d764210 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/analysis/values.yaml @@ -0,0 +1,68 @@ +# Default values for analysis. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/.helmignore new file mode 100644 index 0000000..db3418b --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/.helmignore @@ -0,0 +1,29 @@ +# Git +.git/ +.gitignore +.github/ + +# IDE +.project +.idea/ +*.tmproj + +# Common backup files +*.swp +*.bak +*.tmp +*~ + +# Cortex ignore +docs/ +tools/ +ct.yaml +ci/ +README.md.gotmpl +.prettierignore +CHANGELOG.md +MAINTAINERS.md +LICENSE +Makefile +renovate.json + diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.lock b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.lock new file mode 100644 index 0000000..f909218 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.lock @@ -0,0 +1,24 @@ +dependencies: +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +digest: sha256:a6b7c1239f9cabc85dd647798a6f92ae8a9486756ab1e87fc11af2180ab03ee4 +generated: "2021-12-25T19:21:57.666697218Z" diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.yaml new file mode 100644 index 0000000..9122fe6 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/Chart.yaml @@ -0,0 +1,56 @@ +apiVersion: v2 +appVersion: v1.11.0 +dependencies: +- alias: memcached + condition: memcached.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-read + condition: memcached-index-read.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-write + condition: memcached-index-write.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-frontend + condition: memcached-frontend.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-blocks-index + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks-metadata + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +description: Horizontally scalable, highly available, multi-tenant, long term Prometheus. +home: https://cortexmetrics.io/ +icon: https://avatars2.githubusercontent.com/u/43045022?s=200&v=4 +kubeVersion: ^1.19.0-0 +maintainers: +- email: thayward@infoblox.com + name: Tom Hayward + url: https://github.com/kd7lxl +- email: Niclas.Schad@plusserver.com + name: Niclas Schad + url: https://github.com/ShuzZzle +name: cortex +sources: +- https://github.com/cortexproject/cortex-helm-chart +version: 1.2.0 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/README.md b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/README.md new file mode 100644 index 0000000..9a793d3 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/README.md @@ -0,0 +1,754 @@ + + +# cortex + +![Version: 1.2.0](https://img.shields.io/badge/Version-1.2.0-informational?style=flat-square) ![AppVersion: v1.11.0](https://img.shields.io/badge/AppVersion-v1.11.0-informational?style=flat-square) + +Horizontally scalable, highly available, multi-tenant, long term Prometheus. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Tom Hayward | thayward@infoblox.com | https://github.com/kd7lxl | +| Niclas Schad | Niclas.Schad@plusserver.com | https://github.com/ShuzZzle | + +## Documentation + +Checkout our documentation for the cortex-helm-chart [here](https://cortexproject.github.io/cortex-helm-chart/) + +## Dependencies + +### Key-Value store + +Cortex requires a Key-Value (KV) store to store the ring. It can use traditional KV stores like [Consul](https://www.consul.io/) or [etcd](https://etcd.io/), but it can also build its own KV store on top of memberlist library using a gossip algorithm. + +The recommended approach is to use the built-in memberlist as a KV store, where supported. + +External KV stores can be installed alongside Cortex using their respective helm charts https://github.com/bitnami/charts/tree/master/bitnami/etcd and https://github.com/helm/charts/tree/master/stable/consul. + +### Storage + +Cortex requires a storage backend to store metrics and indexes. +See [cortex documentation](https://cortexmetrics.io/docs/) for details on storage types and documentation + +## Installation + +[Helm](https://helm.sh) must be installed to use the charts. +Please refer to Helm's [documentation](https://helm.sh/docs/) to get started. + +Once Helm is set up properly, add the repo as follows: + +```bash + helm repo add cortex-helm https://cortexproject.github.io/cortex-helm-chart +``` + +Cortex can now be installed with the following command: + +```bash + helm install cortex --namespace cortex cortex-helm/cortex +``` + +If you have custom options or values you want to override: + +```bash + helm install cortex --namespace cortex -f my-cortex-values.yaml cortex-helm/cortex +``` + +Specific versions of the chart can be installed using the `--version` option, with the default being the latest release. +What versions are available for installation can be listed with the following command: + +```bash + helm search repo cortex-helm +``` + +As part of this chart many different pods and services are installed which all +have varying resource requirements. Please make sure that you have sufficient +resources (CPU/memory) available in your cluster before installing Cortex Helm +chart. + +## Upgrades + +To upgrade Cortex use the following command: + +```bash + helm upgrade cortex -f my-cortex-values.yaml cortex-helm/cortex +``` +Note that it might be necessary to use `--reset-values` since some default values in the values.yaml might have changed or were removed. + +Source code can be found [here](https://cortexmetrics.io/) + +## Requirements + +Kubernetes: `^1.19.0-0` + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | memcached(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-read(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-write(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-frontend(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-index(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-metadata(memcached) | 5.15.12 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| alertmanager.​affinity | object | `{}` | | +| alertmanager.​annotations | object | `{}` | | +| alertmanager.​containerSecurityContext.​enabled | bool | `true` | | +| alertmanager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| alertmanager.​enabled | bool | `true` | | +| alertmanager.​env | list | `[]` | Extra env variables to pass to the cortex container | +| alertmanager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log level (debug, info, warn, error) | +| alertmanager.​extraContainers | list | `[]` | Additional containers to be added to the cortex pod. | +| alertmanager.​extraPorts | list | `[]` | Additional ports to the cortex services. Useful to expose extra container ports. | +| alertmanager.​extraVolumeMounts | list | `[]` | Extra volume mounts that will be added to the cortex container | +| alertmanager.​extraVolumes | list | `[]` | Additional volumes to the cortex pod. | +| alertmanager.​initContainers | list | `[]` | Init containers to be added to the cortex pod. | +| alertmanager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​nodeSelector | object | `{}` | | +| alertmanager.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Alertmanager data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| alertmanager.​persistentVolume.​annotations | object | `{}` | Alertmanager data Persistent Volume Claim annotations | +| alertmanager.​persistentVolume.​enabled | bool | `true` | If true and alertmanager.statefulSet.enabled is true, Alertmanager will create/use a Persistent Volume Claim If false, use emptyDir | +| alertmanager.​persistentVolume.​size | string | `"2Gi"` | Alertmanager data Persistent Volume size | +| alertmanager.​persistentVolume.​storageClass | string | `nil` | Alertmanager data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| alertmanager.​persistentVolume.​subPath | string | `""` | Subdirectory of Alertmanager data Persistent Volume to mount Useful if the volume's root directory is not empty | +| alertmanager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| alertmanager.​podDisruptionBudget | object | `{"maxUnavailable":1}` | If not set then a PodDisruptionBudget will not be created | +| alertmanager.​podLabels | object | `{}` | Pod Labels | +| alertmanager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​replicas | int | `1` | | +| alertmanager.​resources | object | `{}` | | +| alertmanager.​securityContext | object | `{}` | | +| alertmanager.​service.​annotations | object | `{}` | | +| alertmanager.​service.​labels | object | `{}` | | +| alertmanager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| alertmanager.​serviceMonitor.​additionalLabels | object | `{}` | | +| alertmanager.​serviceMonitor.​enabled | bool | `false` | | +| alertmanager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| alertmanager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| alertmanager.​serviceMonitor.​relabelings | list | `[]` | | +| alertmanager.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/data","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_alertmanager","labelValue":null,"resources":{},"searchNamespace":null,"skipTlsVerify":false,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| alertmanager.​sidecar.​skipTlsVerify | bool | `false` | skipTlsVerify Set to true to skip tls verification for kube api calls | +| alertmanager.​startupProbe.​failureThreshold | int | `10` | | +| alertmanager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful for using a persistent volume for storing silences between restarts. | +| alertmanager.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| alertmanager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| alertmanager.​strategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​terminationGracePeriodSeconds | int | `60` | | +| alertmanager.​tolerations | list | `[]` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| clusterDomain | string | `"cluster.local"` | Kubernetes cluster DNS domain | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"compactor"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| compactor.​annotations | object | `{}` | | +| compactor.​containerSecurityContext.​enabled | bool | `true` | | +| compactor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| compactor.​enabled | bool | `true` | | +| compactor.​env | list | `[]` | | +| compactor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| compactor.​extraContainers | list | `[]` | | +| compactor.​extraPorts | list | `[]` | | +| compactor.​extraVolumeMounts | list | `[]` | | +| compactor.​extraVolumes | list | `[]` | | +| compactor.​initContainers | list | `[]` | | +| compactor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​nodeSelector | object | `{}` | | +| compactor.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | compactor data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| compactor.​persistentVolume.​annotations | object | `{}` | compactor data Persistent Volume Claim annotations | +| compactor.​persistentVolume.​enabled | bool | `true` | If true compactor will create/use a Persistent Volume Claim If false, use emptyDir | +| compactor.​persistentVolume.​size | string | `"2Gi"` | | +| compactor.​persistentVolume.​storageClass | string | `nil` | compactor data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| compactor.​persistentVolume.​subPath | string | `""` | Subdirectory of compactor data Persistent Volume to mount Useful if the volume's root directory is not empty | +| compactor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| compactor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| compactor.​podLabels | object | `{}` | Pod Labels | +| compactor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​replicas | int | `1` | | +| compactor.​resources | object | `{}` | | +| compactor.​securityContext | object | `{}` | | +| compactor.​service.​annotations | object | `{}` | | +| compactor.​service.​labels | object | `{}` | | +| compactor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| compactor.​serviceMonitor.​additionalLabels | object | `{}` | | +| compactor.​serviceMonitor.​enabled | bool | `false` | | +| compactor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| compactor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| compactor.​serviceMonitor.​relabelings | list | `[]` | | +| compactor.​startupProbe.​failureThreshold | int | `60` | | +| compactor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​startupProbe.​initialDelaySeconds | int | `120` | | +| compactor.​startupProbe.​periodSeconds | int | `30` | | +| compactor.​strategy.​type | string | `"RollingUpdate"` | | +| compactor.​terminationGracePeriodSeconds | int | `240` | | +| compactor.​tolerations | list | `[]` | | +| config.​alertmanager.​enable_api | bool | `false` | Enable the experimental alertmanager config api. | +| config.​alertmanager.​external_url | string | `"/api/prom/alertmanager"` | | +| config.​alertmanager.​storage | object | `{}` | Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config | +| config.​api.​prometheus_http_prefix | string | `"/prometheus"` | | +| config.​api.​response_compression_enabled | bool | `true` | Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression. | +| config.​auth_enabled | bool | `false` | | +| config.​blocks_storage.​bucket_store.​bucket_index.​enabled | bool | `true` | | +| config.​blocks_storage.​bucket_store.​sync_dir | string | `"/data/tsdb-sync"` | | +| config.​blocks_storage.​tsdb.​dir | string | `"/data/tsdb"` | | +| config.​distributor.​pool.​health_check_ingesters | bool | `true` | | +| config.​distributor.​shard_by_all_labels | bool | `true` | Distribute samples based on all labels, as opposed to solely by user and metric name. | +| config.​frontend.​log_queries_longer_than | string | `"10s"` | | +| config.​ingester.​lifecycler.​final_sleep | string | `"30s"` | Duration to sleep for before exiting, to ensure metrics are scraped. | +| config.​ingester.​lifecycler.​join_after | string | `"10s"` | We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. It can take a while to have the full picture when using gossip | +| config.​ingester.​lifecycler.​num_tokens | int | `512` | | +| config.​ingester.​lifecycler.​observe_period | string | `"10s"` | To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, after putting their own tokens into it. This is only useful when using gossip, since multiple ingesters joining at the same time can have conflicting tokens if they don't see each other yet. | +| config.​ingester.​lifecycler.​ring.​kvstore.​store | string | `"memberlist"` | | +| config.​ingester.​lifecycler.​ring.​replication_factor | int | `3` | Ingester replication factor per default is 3 | +| config.​ingester_client.​grpc_client_config.​max_recv_msg_size | int | `10485760` | | +| config.​ingester_client.​grpc_client_config.​max_send_msg_size | int | `10485760` | | +| config.​limits.​enforce_metric_name | bool | `true` | Enforce that every sample has a metric name | +| config.​limits.​max_query_lookback | string | `"0s"` | | +| config.​limits.​reject_old_samples | bool | `true` | | +| config.​limits.​reject_old_samples_max_age | string | `"168h"` | | +| config.​memberlist.​bind_port | int | `7946` | | +| config.​memberlist.​join_members | list | `["{{ include \"cortex.fullname\" $ }}-memberlist"]` | the service name of the memberlist if using memberlist discovery | +| config.​querier.​active_query_tracker_dir | string | `"/data/active-query-tracker"` | | +| config.​querier.​query_ingesters_within | string | `"13h"` | Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester. Ingesters by default have no data older than 12 hours, so we can safely set this 13 hours | +| config.​querier.​query_store_after | string | `"12h"` | The time after which a metric should be queried from storage and not just ingesters. | +| config.​querier.​store_gateway_addresses | string | automatic | Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should is set automatically when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring). | +| config.​query_range.​align_queries_with_step | bool | `true` | | +| config.​query_range.​cache_results | bool | `true` | | +| config.​query_range.​results_cache.​cache.​memcached.​expiration | string | `"1h"` | | +| config.​query_range.​results_cache.​cache.​memcached_client.​timeout | string | `"1s"` | | +| config.​query_range.​split_queries_by_interval | string | `"24h"` | | +| config.​ruler.​enable_alertmanager_discovery | bool | `false` | | +| config.​ruler.​enable_api | bool | `true` | Enable the experimental ruler config api. | +| config.​ruler.​storage | object | `{}` | Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config | +| config.​runtime_config.​file | string | `"/etc/cortex-runtime-config/runtime_config.yaml"` | | +| config.​server.​grpc_listen_port | int | `9095` | | +| config.​server.​grpc_server_max_concurrent_streams | int | `10000` | | +| config.​server.​grpc_server_max_recv_msg_size | int | `10485760` | | +| config.​server.​grpc_server_max_send_msg_size | int | `10485760` | | +| config.​server.​http_listen_port | int | `8080` | | +| config.​storage | object | `{"engine":"blocks","index_queries_cache_config":{"memcached":{"expiration":"1h"},"memcached_client":{"timeout":"1s"}}}` | See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config | +| config.​storage.​index_queries_cache_config.​memcached.​expiration | string | `"1h"` | How long keys stay in the memcache | +| config.​storage.​index_queries_cache_config.​memcached_client.​timeout | string | `"1s"` | Maximum time to wait before giving up on memcached requests. | +| config.​store_gateway | object | `{"sharding_enabled":false}` | https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config | +| configs.​affinity | object | `{}` | | +| configs.​annotations | object | `{}` | | +| configs.​containerSecurityContext.​enabled | bool | `true` | | +| configs.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| configs.​enabled | bool | `false` | | +| configs.​env | list | `[]` | | +| configs.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| configs.​extraContainers | list | `[]` | | +| configs.​extraPorts | list | `[]` | | +| configs.​extraVolumeMounts | list | `[]` | | +| configs.​extraVolumes | list | `[]` | | +| configs.​initContainers | list | `[]` | | +| configs.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​nodeSelector | object | `{}` | | +| configs.​persistentVolume.​subPath | string | `nil` | | +| configs.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| configs.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| configs.​podLabels | object | `{}` | Pod Labels | +| configs.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​replicas | int | `1` | | +| configs.​resources | object | `{}` | | +| configs.​securityContext | object | `{}` | | +| configs.​service.​annotations | object | `{}` | | +| configs.​service.​labels | object | `{}` | | +| configs.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| configs.​serviceMonitor.​additionalLabels | object | `{}` | | +| configs.​serviceMonitor.​enabled | bool | `false` | | +| configs.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| configs.​serviceMonitor.​metricRelabelings | list | `[]` | | +| configs.​serviceMonitor.​relabelings | list | `[]` | | +| configs.​startupProbe.​failureThreshold | int | `10` | | +| configs.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| configs.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| configs.​strategy.​type | string | `"RollingUpdate"` | | +| configs.​terminationGracePeriodSeconds | int | `180` | | +| configs.​tolerations | list | `[]` | | +| configsdb_postgresql.​auth.​existing_secret.​key | string | `nil` | | +| configsdb_postgresql.​auth.​existing_secret.​name | string | `nil` | | +| configsdb_postgresql.​auth.​password | string | `nil` | | +| configsdb_postgresql.​enabled | bool | `false` | | +| configsdb_postgresql.​uri | string | `nil` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"distributor"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| distributor.​annotations | object | `{}` | | +| distributor.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| distributor.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the distributor pods. | +| distributor.​autoscaling.​maxReplicas | int | `30` | | +| distributor.​autoscaling.​minReplicas | int | `2` | | +| distributor.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| distributor.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| distributor.​containerSecurityContext.​enabled | bool | `true` | | +| distributor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| distributor.​env | list | `[]` | | +| distributor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| distributor.​extraContainers | list | `[]` | | +| distributor.​extraPorts | list | `[]` | | +| distributor.​extraVolumeMounts | list | `[]` | | +| distributor.​extraVolumes | list | `[]` | | +| distributor.​initContainers | list | `[]` | | +| distributor.​lifecycle | object | `{}` | | +| distributor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​nodeSelector | object | `{}` | | +| distributor.​persistentVolume.​subPath | string | `nil` | | +| distributor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| distributor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| distributor.​podLabels | object | `{}` | Pod Labels | +| distributor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​replicas | int | `2` | | +| distributor.​resources | object | `{}` | | +| distributor.​securityContext | object | `{}` | | +| distributor.​service.​annotations | object | `{}` | | +| distributor.​service.​labels | object | `{}` | | +| distributor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| distributor.​serviceMonitor.​additionalLabels | object | `{}` | | +| distributor.​serviceMonitor.​enabled | bool | `false` | | +| distributor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| distributor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| distributor.​serviceMonitor.​relabelings | list | `[]` | | +| distributor.​startupProbe.​failureThreshold | int | `10` | | +| distributor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| distributor.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| distributor.​strategy.​type | string | `"RollingUpdate"` | | +| distributor.​terminationGracePeriodSeconds | int | `60` | | +| distributor.​tolerations | list | `[]` | | +| externalConfigSecretName | string | `"secret-with-config.yaml"` | | +| externalConfigVersion | string | `"0"` | | +| image.​pullPolicy | string | `"IfNotPresent"` | | +| image.​pullSecrets | list | `[]` | Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace. ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| image.​repository | string | `"quay.io/cortexproject/cortex"` | | +| image.​tag | string | `""` | Allows you to override the cortex version in this chart. Use at your own risk. | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"ingester"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| ingester.​annotations | object | `{}` | | +| ingester.​autoscaling.​behavior.​scaleDown.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details | +| ingester.​autoscaling.​behavior.​scaleDown.​stabilizationWindowSeconds | int | `3600` | uses metrics from the past 1h to make scaleDown decisions | +| ingester.​autoscaling.​behavior.​scaleUp.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | This default scaleup policy allows adding 1 pod every 30 minutes. Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| ingester.​autoscaling.​enabled | bool | `false` | | +| ingester.​autoscaling.​maxReplicas | int | `30` | | +| ingester.​autoscaling.​minReplicas | int | `3` | | +| ingester.​autoscaling.​targetMemoryUtilizationPercentage | int | `80` | | +| ingester.​containerSecurityContext.​enabled | bool | `true` | | +| ingester.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ingester.​env | list | `[]` | | +| ingester.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ingester.​extraContainers | list | `[]` | | +| ingester.​extraPorts | list | `[]` | | +| ingester.​extraVolumeMounts | list | `[]` | | +| ingester.​extraVolumes | list | `[]` | | +| ingester.​initContainers | list | `[]` | | +| ingester.​lifecycle.​preStop | object | `{"httpGet":{"path":"/ingester/shutdown","port":"http-metrics"}}` | The /shutdown preStop hook is recommended as part of the ingester scaledown process, but can be removed to optimize rolling restarts in instances that will never be scaled down or when using chunks storage with WAL disabled. https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down | +| ingester.​livenessProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​nodeSelector | object | `{}` | | +| ingester.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Ingester data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| ingester.​persistentVolume.​annotations | object | `{}` | Ingester data Persistent Volume Claim annotations | +| ingester.​persistentVolume.​enabled | bool | `true` | If true and ingester.statefulSet.enabled is true, Ingester will create/use a Persistent Volume Claim If false, use emptyDir | +| ingester.​persistentVolume.​size | string | `"2Gi"` | Ingester data Persistent Volume size | +| ingester.​persistentVolume.​storageClass | string | `nil` | Ingester data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| ingester.​persistentVolume.​subPath | string | `""` | Subdirectory of Ingester data Persistent Volume to mount Useful if the volume's root directory is not empty | +| ingester.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ingester.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ingester.​podLabels | object | `{}` | Pod Labels | +| ingester.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ingester.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ingester.​replicas | int | `3` | | +| ingester.​resources | object | `{}` | | +| ingester.​securityContext | object | `{}` | | +| ingester.​service.​annotations | object | `{}` | | +| ingester.​service.​labels | object | `{}` | | +| ingester.​serviceAccount.​name | string | `nil` | | +| ingester.​serviceMonitor.​additionalLabels | object | `{}` | | +| ingester.​serviceMonitor.​enabled | bool | `false` | | +| ingester.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ingester.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ingester.​serviceMonitor.​relabelings | list | `[]` | | +| ingester.​startupProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful when using WAL | +| ingester.​statefulSet.​podManagementPolicy | string | `"OrderedReady"` | ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details | +| ingester.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| ingester.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ingester.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ingester.​strategy.​type | string | `"RollingUpdate"` | | +| ingester.​terminationGracePeriodSeconds | int | `240` | | +| ingester.​tolerations | list | `[]` | | +| ingress.​annotations | object | `{}` | | +| ingress.​enabled | bool | `false` | | +| ingress.​hosts[0].​host | string | `"chart-example.local"` | | +| ingress.​hosts[0].​paths[0] | string | `"/"` | | +| ingress.​ingressClass.​enabled | bool | `false` | | +| ingress.​ingressClass.​name | string | `"nginx"` | | +| ingress.​tls | list | `[]` | | +| memcached | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | chunk caching for legacy chunk storage engine | +| memcached-blocks-index.​architecture | string | `"high-availability"` | | +| memcached-blocks-index.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-index.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-index.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-index.​metrics.​enabled | bool | `true` | | +| memcached-blocks-index.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-index.​replicaCount | int | `2` | | +| memcached-blocks-index.​resources | object | `{}` | | +| memcached-blocks-metadata.​architecture | string | `"high-availability"` | | +| memcached-blocks-metadata.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-metadata.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-metadata.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-metadata.​metrics.​enabled | bool | `true` | | +| memcached-blocks-metadata.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-metadata.​replicaCount | int | `2` | | +| memcached-blocks-metadata.​resources | object | `{}` | | +| memcached-blocks.​architecture | string | `"high-availability"` | | +| memcached-blocks.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks.​metrics.​enabled | bool | `true` | | +| memcached-blocks.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks.​replicaCount | int | `2` | | +| memcached-blocks.​resources | object | `{}` | | +| memcached-frontend.​architecture | string | `"high-availability"` | | +| memcached-frontend.​enabled | bool | `false` | | +| memcached-frontend.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-frontend.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-frontend.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-frontend.​metrics.​enabled | bool | `true` | | +| memcached-frontend.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-frontend.​replicaCount | int | `2` | | +| memcached-frontend.​resources | object | `{}` | | +| memcached-index-read | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index read caching for legacy chunk storage engine | +| memcached-index-read.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-read.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-read.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-index-write | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index write caching for legacy chunk storage engine | +| memcached-index-write.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-write.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-write.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| nginx.​affinity | object | `{}` | | +| nginx.​annotations | object | `{}` | | +| nginx.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| nginx.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the nginx pods. | +| nginx.​autoscaling.​maxReplicas | int | `30` | | +| nginx.​autoscaling.​minReplicas | int | `2` | | +| nginx.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| nginx.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| nginx.​config.​auth_orgs | list | `[]` | (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config | +| nginx.​config.​basicAuthSecretName | string | `""` | (optional) Name of basic auth secret. In order to use this option, a secret with htpasswd formatted contents at the key ".htpasswd" must exist. For example: apiVersion: v1 kind: Secret metadata: name: my-secret namespace: stringData: .htpasswd: | user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ Please note that the use of basic auth will not identify organizations the way X-Scope-OrgID does. Thus, the use of basic auth alone will not prevent one tenant from viewing the metrics of another. To ensure tenants are scoped appropriately, explicitly set the `X-Scope-OrgID` header in the nginx config. Example setHeaders: X-Scope-OrgID: $remote_user | +| nginx.​config.​client_max_body_size | string | `"1M"` | ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size | +| nginx.​config.​dnsResolver | string | `"coredns.kube-system.svc.cluster.local"` | | +| nginx.​config.​httpSnippet | string | `""` | arbitrary snippet to inject in the http { } section of the nginx config | +| nginx.​config.​mainSnippet | string | `""` | arbitrary snippet to inject in the top section of the nginx config | +| nginx.​config.​serverSnippet | string | `""` | arbitrary snippet to inject in the server { } section of the nginx config | +| nginx.​config.​setHeaders | object | `{}` | | +| nginx.​containerSecurityContext.​enabled | bool | `true` | | +| nginx.​containerSecurityContext.​readOnlyRootFilesystem | bool | `false` | | +| nginx.​enabled | bool | `true` | | +| nginx.​env | list | `[]` | | +| nginx.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| nginx.​extraContainers | list | `[]` | | +| nginx.​extraPorts | list | `[]` | | +| nginx.​extraVolumeMounts | list | `[]` | | +| nginx.​extraVolumes | list | `[]` | | +| nginx.​http_listen_port | int | `80` | | +| nginx.​image.​pullPolicy | string | `"IfNotPresent"` | | +| nginx.​image.​repository | string | `"nginx"` | | +| nginx.​image.​tag | float | `1.21` | | +| nginx.​initContainers | list | `[]` | | +| nginx.​livenessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​nodeSelector | object | `{}` | | +| nginx.​persistentVolume.​subPath | string | `nil` | | +| nginx.​podAnnotations | object | `{}` | Pod Annotations | +| nginx.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| nginx.​podLabels | object | `{}` | Pod Labels | +| nginx.​readinessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​replicas | int | `2` | | +| nginx.​resources | object | `{}` | | +| nginx.​securityContext | object | `{}` | | +| nginx.​service.​annotations | object | `{}` | | +| nginx.​service.​labels | object | `{}` | | +| nginx.​service.​type | string | `"ClusterIP"` | | +| nginx.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| nginx.​startupProbe.​failureThreshold | int | `10` | | +| nginx.​startupProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| nginx.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| nginx.​strategy.​type | string | `"RollingUpdate"` | | +| nginx.​terminationGracePeriodSeconds | int | `10` | | +| nginx.​tolerations | list | `[]` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"querier"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| querier.​annotations | object | `{}` | | +| querier.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| querier.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the querier pods. | +| querier.​autoscaling.​maxReplicas | int | `30` | | +| querier.​autoscaling.​minReplicas | int | `2` | | +| querier.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| querier.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| querier.​containerSecurityContext.​enabled | bool | `true` | | +| querier.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| querier.​env | list | `[]` | | +| querier.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| querier.​extraContainers | list | `[]` | | +| querier.​extraPorts | list | `[]` | | +| querier.​extraVolumeMounts | list | `[]` | | +| querier.​extraVolumes | list | `[]` | | +| querier.​initContainers | list | `[]` | | +| querier.​lifecycle | object | `{}` | | +| querier.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​nodeSelector | object | `{}` | | +| querier.​persistentVolume.​subPath | string | `nil` | | +| querier.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| querier.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| querier.​podLabels | object | `{}` | Pod Labels | +| querier.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​replicas | int | `2` | | +| querier.​resources | object | `{}` | | +| querier.​securityContext | object | `{}` | | +| querier.​service.​annotations | object | `{}` | | +| querier.​service.​labels | object | `{}` | | +| querier.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| querier.​serviceMonitor.​additionalLabels | object | `{}` | | +| querier.​serviceMonitor.​enabled | bool | `false` | | +| querier.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| querier.​serviceMonitor.​metricRelabelings | list | `[]` | | +| querier.​serviceMonitor.​relabelings | list | `[]` | | +| querier.​startupProbe.​failureThreshold | int | `10` | | +| querier.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| querier.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| querier.​strategy.​type | string | `"RollingUpdate"` | | +| querier.​terminationGracePeriodSeconds | int | `180` | | +| querier.​tolerations | list | `[]` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"query-frontend"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| query_frontend.​annotations | object | `{}` | | +| query_frontend.​containerSecurityContext.​enabled | bool | `true` | | +| query_frontend.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| query_frontend.​env | list | `[]` | | +| query_frontend.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| query_frontend.​extraContainers | list | `[]` | | +| query_frontend.​extraPorts | list | `[]` | | +| query_frontend.​extraVolumeMounts | list | `[]` | | +| query_frontend.​extraVolumes | list | `[]` | | +| query_frontend.​initContainers | list | `[]` | | +| query_frontend.​lifecycle | object | `{}` | | +| query_frontend.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​nodeSelector | object | `{}` | | +| query_frontend.​persistentVolume.​subPath | string | `nil` | | +| query_frontend.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| query_frontend.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| query_frontend.​podLabels | object | `{}` | Pod Labels | +| query_frontend.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​replicas | int | `2` | | +| query_frontend.​resources | object | `{}` | | +| query_frontend.​securityContext | object | `{}` | | +| query_frontend.​service.​annotations | object | `{}` | | +| query_frontend.​service.​labels | object | `{}` | | +| query_frontend.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| query_frontend.​serviceMonitor.​additionalLabels | object | `{}` | | +| query_frontend.​serviceMonitor.​enabled | bool | `false` | | +| query_frontend.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| query_frontend.​serviceMonitor.​metricRelabelings | list | `[]` | | +| query_frontend.​serviceMonitor.​relabelings | list | `[]` | | +| query_frontend.​startupProbe.​failureThreshold | int | `10` | | +| query_frontend.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| query_frontend.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| query_frontend.​strategy.​type | string | `"RollingUpdate"` | | +| query_frontend.​terminationGracePeriodSeconds | int | `180` | | +| query_frontend.​tolerations | list | `[]` | | +| ruler.​affinity | object | `{}` | | +| ruler.​annotations | object | `{}` | | +| ruler.​containerSecurityContext.​enabled | bool | `true` | | +| ruler.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ruler.​directories | object | `{}` | allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html | +| ruler.​enabled | bool | `true` | | +| ruler.​env | list | `[]` | | +| ruler.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ruler.​extraContainers | list | `[]` | | +| ruler.​extraPorts | list | `[]` | | +| ruler.​extraVolumeMounts | list | `[]` | | +| ruler.​extraVolumes | list | `[]` | | +| ruler.​initContainers | list | `[]` | | +| ruler.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​nodeSelector | object | `{}` | | +| ruler.​persistentVolume.​subPath | string | `nil` | | +| ruler.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ruler.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ruler.​podLabels | object | `{}` | Pod Labels | +| ruler.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​replicas | int | `1` | | +| ruler.​resources | object | `{}` | | +| ruler.​securityContext | object | `{}` | | +| ruler.​service.​annotations | object | `{}` | | +| ruler.​service.​labels | object | `{}` | | +| ruler.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| ruler.​serviceMonitor.​additionalLabels | object | `{}` | | +| ruler.​serviceMonitor.​enabled | bool | `false` | | +| ruler.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ruler.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ruler.​serviceMonitor.​relabelings | list | `[]` | | +| ruler.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/tmp/rules","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_rules","labelValue":null,"resources":{},"searchNamespace":null,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| ruler.​sidecar.​defaultFolderName | string | `nil` | The default folder name, it will create a subfolder under the `folder` and put rules in there instead | +| ruler.​sidecar.​folder | string | `"/tmp/rules"` | folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) | +| ruler.​sidecar.​folderAnnotation | string | `nil` | If specified, the sidecar will look for annotation with this name to create folder and put graph here. You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. | +| ruler.​sidecar.​label | string | `"cortex_rules"` | label that the configmaps with rules are marked with | +| ruler.​sidecar.​labelValue | string | `nil` | value of label that the configmaps with rules are set to | +| ruler.​sidecar.​searchNamespace | string | `nil` | If specified, the sidecar will search for rules config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | +| ruler.​startupProbe.​failureThreshold | int | `10` | | +| ruler.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ruler.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ruler.​strategy.​type | string | `"RollingUpdate"` | | +| ruler.​terminationGracePeriodSeconds | int | `180` | | +| ruler.​tolerations | list | `[]` | | +| runtimeconfigmap.​annotations | object | `{}` | | +| runtimeconfigmap.​create | bool | `true` | If true, a configmap for the `runtime_config` will be created. If false, the configmap _must_ exist already on the cluster or pods will fail to create. | +| runtimeconfigmap.​runtime_config | object | `{}` | https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file | +| serviceAccount.​annotations | object | `{}` | | +| serviceAccount.​automountServiceAccountToken | bool | `true` | | +| serviceAccount.​create | bool | `true` | | +| serviceAccount.​name | string | `nil` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"store-gateway"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| store_gateway.​annotations | object | `{}` | | +| store_gateway.​containerSecurityContext.​enabled | bool | `true` | | +| store_gateway.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| store_gateway.​env | list | `[]` | | +| store_gateway.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| store_gateway.​extraContainers | list | `[]` | | +| store_gateway.​extraPorts | list | `[]` | | +| store_gateway.​extraVolumeMounts | list | `[]` | | +| store_gateway.​extraVolumes | list | `[]` | | +| store_gateway.​initContainers | list | `[]` | | +| store_gateway.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​nodeSelector | object | `{}` | | +| store_gateway.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Store-gateway data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| store_gateway.​persistentVolume.​annotations | object | `{}` | Store-gateway data Persistent Volume Claim annotations | +| store_gateway.​persistentVolume.​enabled | bool | `true` | If true Store-gateway will create/use a Persistent Volume Claim If false, use emptyDir | +| store_gateway.​persistentVolume.​size | string | `"2Gi"` | Store-gateway data Persistent Volume size | +| store_gateway.​persistentVolume.​storageClass | string | `nil` | Store-gateway data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| store_gateway.​persistentVolume.​subPath | string | `""` | Subdirectory of Store-gateway data Persistent Volume to mount Useful if the volume's root directory is not empty | +| store_gateway.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| store_gateway.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| store_gateway.​podLabels | object | `{}` | Pod Labels | +| store_gateway.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​replicas | int | `1` | | +| store_gateway.​resources | object | `{}` | | +| store_gateway.​securityContext | object | `{}` | | +| store_gateway.​service.​annotations | object | `{}` | | +| store_gateway.​service.​labels | object | `{}` | | +| store_gateway.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| store_gateway.​serviceMonitor.​additionalLabels | object | `{}` | | +| store_gateway.​serviceMonitor.​enabled | bool | `false` | | +| store_gateway.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| store_gateway.​serviceMonitor.​metricRelabelings | list | `[]` | | +| store_gateway.​serviceMonitor.​relabelings | list | `[]` | | +| store_gateway.​startupProbe.​failureThreshold | int | `60` | | +| store_gateway.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​startupProbe.​initialDelaySeconds | int | `120` | | +| store_gateway.​startupProbe.​periodSeconds | int | `30` | | +| store_gateway.​strategy.​type | string | `"RollingUpdate"` | | +| store_gateway.​terminationGracePeriodSeconds | int | `240` | | +| store_gateway.​tolerations | list | `[]` | | +| table_manager.​affinity | object | `{}` | | +| table_manager.​annotations | object | `{}` | | +| table_manager.​containerSecurityContext.​enabled | bool | `true` | | +| table_manager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| table_manager.​env | list | `[]` | | +| table_manager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| table_manager.​extraContainers | list | `[]` | | +| table_manager.​extraPorts | list | `[]` | | +| table_manager.​extraVolumeMounts | list | `[]` | | +| table_manager.​extraVolumes | list | `[]` | | +| table_manager.​initContainers | list | `[]` | | +| table_manager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​nodeSelector | object | `{}` | | +| table_manager.​persistentVolume.​subPath | string | `nil` | | +| table_manager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| table_manager.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| table_manager.​podLabels | object | `{}` | Pod Labels | +| table_manager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​replicas | int | `1` | | +| table_manager.​resources | object | `{}` | | +| table_manager.​securityContext | object | `{}` | | +| table_manager.​service.​annotations | object | `{}` | | +| table_manager.​service.​labels | object | `{}` | | +| table_manager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| table_manager.​serviceMonitor.​additionalLabels | object | `{}` | | +| table_manager.​serviceMonitor.​enabled | bool | `false` | | +| table_manager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| table_manager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| table_manager.​serviceMonitor.​relabelings | list | `[]` | | +| table_manager.​startupProbe.​failureThreshold | int | `10` | | +| table_manager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| table_manager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| table_manager.​strategy.​type | string | `"RollingUpdate"` | | +| table_manager.​terminationGracePeriodSeconds | int | `180` | | +| table_manager.​tolerations | list | `[]` | | +| tags.​blocks-storage-memcached | bool | `false` | Set to true to enable block storage memcached caching | +| useConfigMap | bool | `false` | | +| useExternalConfig | bool | `false` | | + diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/NOTES.txt b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/NOTES.txt new file mode 100644 index 0000000..1bd3203 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/NOTES.txt @@ -0,0 +1,9 @@ +{{- if eq .Values.config.storage.engine "chunks" }} +Cortex chunks storage has been deprecated, and it's now in maintenance mode: all Cortex users are encouraged to migrate to the blocks storage. +No new features will be added to the chunks storage. +Unlike the official cortex default configuration this helm-chart does not run the chunk engine by default. +{{- end }} + +Verify the application is working by running these commands: + kubectl --namespace {{ .Release.Namespace }} port-forward service/{{ include "cortex.querierFullname" . }} {{ .Values.config.server.http_listen_port }} + curl http://127.0.0.1:{{ .Values.config.server.http_listen_port }}/services diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/_helpers.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/_helpers.tpl new file mode 100644 index 0000000..81914c9 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/_helpers.tpl @@ -0,0 +1,155 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cortex.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cortex.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cortex.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "cortex.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "cortex.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the app name of cortex clients. Defaults to the same logic as "cortex.fullname", and default client expects "prometheus". +*/}} +{{- define "client.name" -}} +{{- if .Values.client.name -}} +{{- .Values.client.name -}} +{{- else if .Values.client.fullnameOverride -}} +{{- .Values.client.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default "prometheus" .Values.client.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Common labels +*/}} +{{- define "cortex.labels" -}} +helm.sh/chart: {{ include "cortex.chart" . }} +{{ include "cortex.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "cortex.selectorLabels" -}} +app.kubernetes.io/name: {{ include "cortex.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create configuration parameters for memcached configuration +*/}} +{{- define "cortex.memcached" -}} +{{- if and (eq .Values.config.storage.engine "blocks") (index .Values "tags" "blocks-storage-memcached") }} +- "-blocks-storage.bucket-store.index-cache.backend=memcached" +- "-blocks-storage.bucket-store.index-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-index.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.chunks-cache.backend=memcached" +- "-blocks-storage.bucket-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.metadata-cache.backend=memcached" +- "-blocks-storage.bucket-store.metadata-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-metadata.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") .Values.memcached.enabled }} +- "-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-read" "enabled") }} +- "-store.index-cache-read.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-read.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-write" "enabled") }} +- "-store.index-cache-write.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-write.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Create configuration for frontend memcached configuration +*/}} +{{- define "cortex.frontend-memcached" -}} +{{- if index .Values "memcached-frontend" "enabled" }} +- "-frontend.memcached.addresses=dns+{{ template "cortex.fullname" . }}-memcached-frontend.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Determine the policy api version +*/}} +{{- define "cortex.pdbVersion" -}} +{{- if or (.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget") (semverCompare ">=1.21" .Capabilities.KubeVersion.Version) -}} +policy/v1 +{{- else -}} +policy/v1beta1 +{{- end -}} +{{- end -}} + +{{/* +Get checksum of config secret or configMap +*/}} +{{- define "cortex.configChecksum" -}} +{{- if .Values.useExternalConfig -}} +{{- .Values.externalConfigVersion -}} +{{- else if .Values.useConfigMap -}} +{{- include (print $.Template.BasePath "/configmap.yaml") . | sha256sum -}} +{{- else -}} +{{- include (print $.Template.BasePath "/secret.yaml") . | sha256sum -}} +{{- end -}} +{{- end -}} + +{{/* +Get volume of config secret of configMap +*/}} +{{- define "cortex.configVolume" -}} +- name: config + {{- if .Values.useExternalConfig }} + secret: + secretName: {{ .Values.externalConfigSecretName }} + {{- else if .Values.useConfigMap }} + configMap: + name: {{ template "cortex.fullname" . }}-config + {{- else }} + secret: + secretName: {{ template "cortex.fullname" . }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml new file mode 100644 index 0000000..49c4ca7 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alertmanager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + name: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - name: alertmanager +# image: quay.io/cortexproject/cortex:v1.9.0 +# image: registry.cloud.intermax:5000/library/cortex:v1.11.0 + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cortex:v1.11.0 + imagePullPolicy: IfNotPresent + args: + - -target=alertmanager +# - -log.level=debug + - -server.http-listen-port=80 + - -alertmanager.configs.url=http://{{ template "cortex.fullname" . }}-configs:8080 + - -alertmanager.web.external-url=/alertmanager + ports: + - containerPort: 80 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml new file mode 100644 index 0000000..989feb2 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: alertmanager +spec: + ports: + - port: 80 + selector: + name: alertmanager diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml new file mode 100644 index 0000000..cf7f25a --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml @@ -0,0 +1,12 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "cortex.fullname" . }}-clusterrole + labels: + {{- include "cortex.labels" . | nindent 4 }} +rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..c1d9884 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cortex.fullname" . }}-clusterrolebinding + labels: + {{- include "cortex.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cortex.fullname" . }}-clusterrole +subjects: + - kind: ServiceAccount + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl new file mode 100644 index 0000000..f89b33c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl @@ -0,0 +1,23 @@ + +{{/* +compactor fullname +*/}} +{{- define "cortex.compactorFullname" -}} +{{ include "cortex.fullname" . }}-compactor +{{- end }} + +{{/* +compactor common labels +*/}} +{{- define "cortex.compactorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: compactor +{{- end }} + +{{/* +compactor selector labels +*/}} +{{- define "cortex.compactorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: compactor +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml new file mode 100644 index 0000000..8634e4c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.compactor.replicas) 1) (.Values.compactor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.compactor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml new file mode 100644 index 0000000..a33e849 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.compactor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- if .Values.compactor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.compactor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.compactor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.compactor.serviceMonitor.interval }} + interval: {{ .Values.compactor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.compactor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.compactor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.compactor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.compactor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml new file mode 100644 index 0000000..c0a1baf --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml @@ -0,0 +1,141 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.compactor.annotations | nindent 4 }} +spec: + replicas: {{ .Values.compactor.replicas }} + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.compactor.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-compactor + {{- if .Values.compactor.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.compactor.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.compactor.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.compactor.persistentVolume.storageClass }} + {{- if (eq "-" .Values.compactor.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.compactor.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.compactor.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.compactor.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.compactorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.compactor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.compactor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.compactor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.compactor.priorityClassName }} + priorityClassName: {{ .Values.compactor.priorityClassName }} + {{- end }} + {{- if .Values.compactor.securityContext.enabled }} + securityContext: {{- omit .Values.compactor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.compactor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.compactor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.compactor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.compactor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.compactor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.compactor.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.compactor.extraVolumes }} + {{- toYaml .Values.compactor.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.compactor.extraContainers }} + {{ toYaml .Values.compactor.extraContainers | nindent 8 }} + {{- end }} + - name: compactor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=compactor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.compactor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.compactor.extraVolumeMounts }} + {{- toYaml .Values.compactor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.compactor.persistentVolume.subPath }} + subPath: {{ .Values.compactor.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.compactor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.compactor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.compactor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.compactor.resources | nindent 12 }} + {{- if .Values.compactor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.compactor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.compactor.env }} + env: + {{- toYaml .Values.compactor.env | nindent 12 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml new file mode 100644 index 0000000..ae20f78 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml @@ -0,0 +1,25 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- with .Values.compactor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.compactor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.compactorSelectorLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configmap.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configmap.yaml new file mode 100644 index 0000000..001b13a --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configmap.yaml @@ -0,0 +1,12 @@ +{{- if (and (not .Values.useExternalConfig) (.Values.useConfigMap)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" . }}-config + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: | + {{- tpl (toYaml .Values.config) . | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl new file mode 100644 index 0000000..c8945dc --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl @@ -0,0 +1,23 @@ + +{{/* +configs fullname +*/}} +{{- define "cortex.configsFullname" -}} +{{ include "cortex.fullname" . }}-configs +{{- end }} + +{{/* +configs common labels +*/}} +{{- define "cortex.configsLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: configs +{{- end }} + +{{/* +configs selector labels +*/}} +{{- define "cortex.configsSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: configs +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml new file mode 100644 index 0000000..86048ce --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml @@ -0,0 +1,124 @@ +{{- if .Values.configs.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.configs.annotations | nindent 4 }} +spec: + replicas: {{ .Values.configs.replicas }} + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.configs.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.configsLabels" . | nindent 8 }} + {{- with .Values.configs.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.configs.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.configs.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.configs.priorityClassName }} + priorityClassName: {{ .Values.configs.priorityClassName }} + {{- end }} + {{- if .Values.configs.securityContext.enabled }} + securityContext: {{- omit .Values.configs.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.configs.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: configs + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=configs" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configsdb_postgresql.enabled }} + - "-configs.database.uri={{ .Values.configsdb_postgresql.uri }}" + - "-configs.database.password-file=/etc/postgresql/password" + - "-configs.database.migrations-dir=/migrations" + {{- else }} + - "-configs.database.uri=memory://" + {{- end }} + {{- range $key, $value := .Values.configs.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/cortex + subPath: {{ .Values.configs.persistentVolume.subPath }} + - name: runtime-config + mountPath: /etc/cortex-runtime-config + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + mountPath: /etc/postgresql + {{- end }} + {{- if .Values.configs.extraVolumeMounts }} + {{- toYaml .Values.configs.extraVolumeMounts | nindent 12}} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.configs.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.configs.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.configs.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.configs.resources | nindent 12 }} + {{- if .Values.configs.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.configs.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.configs.env }} + env: + {{- toYaml .Values.configs.env | nindent 12 }} + {{- end }} + {{- if .Values.configs.extraContainers }} + {{- toYaml .Values.configs.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.configs.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.configs.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.configs.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + secret: + secretName: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.name }}{{ else }}{{ template "cortex.fullname" . }}-postgresql{{ end }} + items: + - key: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.key }}{{ else }}postgresql-password{{ end }} + path: password + {{- end }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.configs.extraVolumes }} + {{- toYaml .Values.configs.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml new file mode 100644 index 0000000..b6e46b4 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.configs.replicas) 1) (.Values.configs.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.configs.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml new file mode 100644 index 0000000..393bc32 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.configs.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- if .Values.configs.serviceMonitor.additionalLabels }} +{{ toYaml .Values.configs.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.configs.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.configs.serviceMonitor.interval }} + interval: {{ .Values.configs.serviceMonitor.interval }} + {{- end }} + {{- if .Values.configs.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.configs.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.configs.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.configs.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.configs.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.configs.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml new file mode 100644 index 0000000..6dbc2cd --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.configs.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- with .Values.configs.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.configs.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.configsSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml new file mode 100644 index 0000000..472f83e --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-0 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH1 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-1 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH2 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-2 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH3 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl new file mode 100644 index 0000000..24e8d00 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl @@ -0,0 +1,23 @@ + +{{/* +distributor fullname +*/}} +{{- define "cortex.distributorFullname" -}} +{{ include "cortex.fullname" . }}-distributor +{{- end }} + +{{/* +distributor common labels +*/}} +{{- define "cortex.distributorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: distributor +{{- end }} + +{{/* +distributor selector labels +*/}} +{{- define "cortex.distributorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: distributor +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml new file mode 100644 index 0000000..fc9c0ba --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.distributor.annotations | nindent 4 }} +spec: + {{- if not .Values.distributor.autoscaling.enabled }} + replicas: {{ .Values.distributor.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.distributor.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.distributorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.distributor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.distributor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.distributor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.distributor.priorityClassName }} + priorityClassName: {{ .Values.distributor.priorityClassName }} + {{- end }} + {{- if .Values.distributor.securityContext.enabled }} + securityContext: {{- omit .Values.distributor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.distributor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: distributor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=distributor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.distributor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.distributor.extraVolumeMounts }} + {{- toYaml .Values.distributor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.distributor.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.distributor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.distributor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.distributor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.distributor.resources | nindent 12 }} + {{- if .Values.distributor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.distributor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.distributor.env }} + env: + {{- toYaml .Values.distributor.env | nindent 12 }} + {{- end }} + {{- with .Values.distributor.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.distributor.extraContainers }} + {{- toYaml .Values.distributor.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.distributor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.distributor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.distributor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.distributor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.distributor.extraVolumes }} + {{- toYaml .Values.distributor.extraVolumes | nindent 8}} + {{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml new file mode 100644 index 0000000..0c1c9f6 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.distributor.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.distributorFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.distributorFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml new file mode 100644 index 0000000..7b05701 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.distributor.replicas) 1) (.Values.distributor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.distributor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml new file mode 100644 index 0000000..5db8389 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.distributor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- if .Values.distributor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.distributor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.distributor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.distributor.serviceMonitor.interval }} + interval: {{ .Values.distributor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.distributor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.distributor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.distributor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.distributor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml new file mode 100644 index 0000000..1c4f7f6 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml new file mode 100644 index 0000000..2db7197 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl new file mode 100644 index 0000000..4705327 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl @@ -0,0 +1,23 @@ + +{{/* +ingester fullname +*/}} +{{- define "cortex.ingesterFullname" -}} +{{ include "cortex.fullname" . }}-ingester +{{- end }} + +{{/* +ingester common labels +*/}} +{{- define "cortex.ingesterLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ingester +{{- end }} + +{{/* +ingester selector labels +*/}} +{{- define "cortex.ingesterSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ingester +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml new file mode 100644 index 0000000..b26d3a3 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml @@ -0,0 +1,130 @@ +{{- if not .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ingester.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.ingester.env }} + {{ toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- with .Values.ingester.extraContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml new file mode 100644 index 0000000..97c5290 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml @@ -0,0 +1,29 @@ +{{- with .Values.ingester.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.ingesterFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ if $.Values.ingester.statefulSet.enabled }}StatefulSet{{ else }}Deployment{{ end }} + name: {{ include "cortex.ingesterFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .targetMemoryUtilizationPercentage }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml new file mode 100644 index 0000000..a47ecb4 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ingester.replicas) 1) (.Values.ingester.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ingester.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml new file mode 100644 index 0000000..310ca54 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ingester.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- if .Values.ingester.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ingester.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ingester.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ingester.serviceMonitor.interval }} + interval: {{ .Values.ingester.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ingester.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ingester.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ingester.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ingester.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml new file mode 100644 index 0000000..8016441 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml @@ -0,0 +1,153 @@ +{{- if .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.ingester.statefulStrategy | nindent 4 }} + podManagementPolicy: "{{ .Values.ingester.statefulSet.podManagementPolicy }}" + serviceName: {{ template "cortex.fullname" . }}-ingester-headless + {{- if .Values.ingester.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.ingester.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.ingester.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.ingester.persistentVolume.storageClass }} + {{- if (eq "-" .Values.ingester.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.ingester.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.ingester.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.ingester.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.ingester.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.ingester.extraContainers }} + {{- toYaml .Values.ingester.extraContainers | nindent 8 }} + {{- end }} + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ingester.env }} + env: + {{- toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml new file mode 100644 index 0000000..b783caa --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml new file mode 100644 index 0000000..02183ae --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl new file mode 100644 index 0000000..61d8b78 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl @@ -0,0 +1,23 @@ + +{{/* +nginx fullname +*/}} +{{- define "cortex.nginxFullname" -}} +{{ include "cortex.fullname" . }}-nginx +{{- end }} + +{{/* +nginx common labels +*/}} +{{- define "cortex.nginxLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: nginx +{{- end }} + +{{/* +nginx selector labels +*/}} +{{- define "cortex.nginxSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: nginx +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml new file mode 100644 index 0000000..fd3474d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml @@ -0,0 +1,140 @@ +{{- if .Values.nginx.enabled }} +{{- $rootDomain := printf "%s.svc.%s:%d" .Release.Namespace .Values.clusterDomain (.Values.config.server.http_listen_port | int) }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +data: + nginx.conf: |- + worker_processes 5; ## Default: 1 + error_log /dev/stderr; + pid /tmp/nginx.pid; + worker_rlimit_nofile 8192; + + events { + worker_connections 4096; ## Default: 1024 + } + + {{- with .Values.nginx.config.mainSnippet }} + {{ tpl . $ | nindent 4 }} + {{- end }} + + http { + default_type application/octet-stream; + client_max_body_size {{.Values.nginx.config.client_max_body_size}}; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" $http_x_scope_orgid'; + access_log /dev/stderr main; + sendfile on; + tcp_nopush on; + resolver {{ default (printf "coredns.kube-system.svc.%s" .Values.clusterDomain ) .Values.nginx.config.dnsResolver }}; + + {{- with .Values.nginx.config.httpSnippet }} + {{ tpl . $ | nindent 6 }} + {{- end }} + + server { # simple reverse-proxy + listen {{ .Values.nginx.http_listen_port }}; + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + proxy_http_version 1.1; + proxy_set_header X-Scope-OrgID 0; + + {{- range $key, $value := .Values.nginx.config.setHeaders }} + proxy_set_header {{ $key }} {{ $value }}; + {{- end }} + + {{ if .Values.nginx.config.basicAuthSecretName -}} + auth_basic "Restricted Content"; + auth_basic_user_file /etc/apache2/.htpasswd; + {{- end }} + + {{- with .Values.nginx.config.serverSnippet }} + {{ tpl . $ | nindent 8 }} + {{- end }} + + location = /healthz { + # auth_basic off is not set here, even when a basic auth directive is + # included in the server block, as Nginx's NGX_HTTP_REWRITE_PHASE + # (point when this return statement is evaluated) comes before the + # NGX_HTTP_ACCESS_PHASE (point when basic auth is evaluated). Thus, + # this return statement returns a response before basic auth is + # evaluated. + return 200 'alive'; + } + + # Distributor Config + location = /ring { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /all_user_stats { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /api/prom/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + ## New Remote write API. Ref: https://cortexmetrics.io/docs/api/#remote-write + location = /api/v1/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + # Alertmanager Config + location ~ /api/prom/alertmanager/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /api/v1/alerts { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /multitenant_alertmanager/status { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + # Ruler Config + location ~ /api/v1/rules { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + location ~ /ruler/ring { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + # Config Config + location ~ /api/prom/configs/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-configs.{{ $rootDomain }}$request_uri; + } + + # Query Config + location ~ /api/prom/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + ## New Query frontend APIs as per https://cortexmetrics.io/docs/api/#querier--query-frontend + location ~ ^{{.Values.config.api.prometheus_http_prefix}}/api/v1/(read|metadata|labels|series|query_range|query) { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + location ~ {{.Values.config.api.prometheus_http_prefix}}/api/v1/label/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + {{- if and (.Values.config.auth_enabled) (.Values.nginx.config.auth_orgs) }} + # Auth orgs + {{- range $org := compact .Values.nginx.config.auth_orgs | uniq }} + location = /api/v1/push/{{ $org }} { + proxy_set_header X-Scope-OrgID {{ $org }}; + proxy_pass http://{{ template "cortex.fullname" $ }}-distributor.{{ $rootDomain }}/api/v1/push; + } + {{- end }} + {{- end }} + } + } +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml new file mode 100644 index 0000000..bbd3a9d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml @@ -0,0 +1,111 @@ +{{- if .Values.nginx.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.nginx.annotations | nindent 4 }} +spec: + {{- if not .Values.nginx.autoscaling.enabled }} + replicas: {{ .Values.nginx.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.nginx.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.nginxLabels" . | nindent 8 }} + {{- with .Values.nginx.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/nginx/nginx-config.yaml") . | sha256sum }} + {{- with .Values.nginx.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.nginx.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.nginx.priorityClassName }} + priorityClassName: {{ .Values.nginx.priorityClassName }} + {{- end }} + {{- if .Values.nginx.securityContext.enabled }} + securityContext: {{- omit .Values.nginx.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.nginx.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: nginx + image: "{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + {{- if .Values.nginx.extraArgs }} + args: + {{- range $key, $value := .Values.nginx.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.nginx.extraVolumeMounts }} + {{- toYaml .Values.nginx.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + mountPath: /etc/apache2 + readOnly: true + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.nginx.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.nginx.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.nginx.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.nginx.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.nginx.resources | nindent 12 }} + {{- if .Values.nginx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.nginx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.nginx.env }} + env: + {{- toYaml .Values.nginx.env | nindent 12 }} + {{- end }} + {{- if .Values.nginx.extraContainers }} + {{ toYaml .Values.nginx.extraContainers | indent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.nginx.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.nginx.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.nginx.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.nginx.terminationGracePeriodSeconds }} + volumes: + - name: config + configMap: + name: {{ template "cortex.fullname" . }}-nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + secret: + defaultMode: 420 + secretName: {{ .Values.nginx.config.basicAuthSecretName }} + {{- end }} + {{- if .Values.nginx.extraVolumes }} + {{- toYaml .Values.nginx.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml new file mode 100644 index 0000000..b93a13d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.nginx.enabled .Values.nginx.autoscaling.enabled }} +{{- with .Values.nginx.autoscaling -}} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.nginxFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.nginxFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml new file mode 100644 index 0000000..51e6609 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.ingress.enabled .Values.nginx.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.ingress.annotations | nindent 4 }} +spec: +{{- if .Values.ingress.ingressClass.enabled }} + ingressClassName: {{ .Values.ingress.ingressClass.name }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + pathType: "Prefix" + backend: + service: + name: {{ include "cortex.nginxFullname" $ }} + port: + number: {{ $.Values.nginx.http_listen_port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml new file mode 100644 index 0000000..959764a --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (.Values.nginx.enabled) (gt (int .Values.nginx.replicas) 1) (.Values.nginx.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.nginx.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml new file mode 100644 index 0000000..72a2c44 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.nginx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + {{- with .Values.nginx.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.nginx.service.annotations | nindent 4 }} +spec: + type: {{ .Values.nginx.service.type }} + ports: + - port: {{ .Values.nginx.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.nginxSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml new file mode 100644 index 0000000..7bb3983 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml @@ -0,0 +1,96 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: 'true' + labels: + app: node-exporter + name: node-exporter + name: node-exporter + namespace: imxc +spec: + clusterIP: None + ports: + - name: scrape + port: 9100 + protocol: TCP + selector: + app: node-exporter + type: ClusterIP +--- +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: DaemonSet +metadata: + name: node-exporter + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: node-exporter +{{- end }} + template: + metadata: + labels: + app: node-exporter + name: node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/node-exporter + name: node-exporter + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + ports: + - containerPort: 9100 + hostPort: 9100 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl new file mode 100644 index 0000000..c0a6204 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl @@ -0,0 +1,23 @@ + +{{/* +querier fullname +*/}} +{{- define "cortex.querierFullname" -}} +{{ include "cortex.fullname" . }}-querier +{{- end }} + +{{/* +querier common labels +*/}} +{{- define "cortex.querierLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: querier +{{- end }} + +{{/* +querier selector labels +*/}} +{{- define "cortex.querierSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: querier +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml new file mode 100644 index 0000000..a84ba8a --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml @@ -0,0 +1,115 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.querier.annotations | nindent 4 }} +spec: + {{- if not .Values.querier.autoscaling.enabled }} + replicas: {{ .Values.querier.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.querier.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.querierLabels" . | nindent 8 }} + {{- with .Values.querier.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.querier.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.querier.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.querier.priorityClassName }} + priorityClassName: {{ .Values.querier.priorityClassName }} + {{- end }} + {{- if .Values.querier.securityContext.enabled }} + securityContext: {{- omit .Values.querier.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.querier.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: querier + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=querier" + - "-config.file=/etc/cortex/cortex.yaml" + - "-querier.frontend-address={{ template "cortex.fullname" . }}-query-frontend-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.grpc_listen_port }}" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.querier.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.querier.extraVolumeMounts }} + {{- toYaml .Values.querier.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.querier.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.querier.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.querier.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.querier.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.querier.resources | nindent 12 }} + {{- if .Values.querier.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.querier.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.querier.env }} + {{- toYaml .Values.querier.env | nindent 12 }} + {{- end }} + {{- with .Values.querier.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.querier.extraContainers }} + {{- toYaml .Values.querier.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.querier.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.querier.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.querier.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.querier.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.querier.extraVolumes }} + {{- toYaml .Values.querier.extraVolumes | nindent 8}} + {{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml new file mode 100644 index 0000000..f078526 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.querier.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.querierFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.querierLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.querierFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml new file mode 100644 index 0000000..b69de62 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.querier.replicas) 1) (.Values.querier.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.querier.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml new file mode 100644 index 0000000..c84d1a4 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.querier.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- if .Values.querier.serviceMonitor.additionalLabels }} +{{ toYaml .Values.querier.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.querier.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.querier.serviceMonitor.interval }} + interval: {{ .Values.querier.serviceMonitor.interval }} + {{- end }} + {{- if .Values.querier.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.querier.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.querier.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.querier.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.querier.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.querier.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml new file mode 100644 index 0000000..0701b7d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- with .Values.querier.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.querier.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.querierSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl new file mode 100644 index 0000000..c1f74c9 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl @@ -0,0 +1,23 @@ + +{{/* +query-frontend fullname +*/}} +{{- define "cortex.queryFrontendFullname" -}} +{{ include "cortex.fullname" . }}-query-frontend +{{- end }} + +{{/* +query-frontend common labels +*/}} +{{- define "cortex.queryFrontendLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} + +{{/* +query-frontend selector labels +*/}} +{{- define "cortex.queryFrontendSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml new file mode 100644 index 0000000..3e31d18 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml @@ -0,0 +1,107 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.query_frontend.annotations | nindent 4 }} +spec: + replicas: {{ .Values.query_frontend.replicas }} + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.query_frontend.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 8 }} + {{- with .Values.query_frontend.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.query_frontend.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.query_frontend.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.query_frontend.priorityClassName }} + priorityClassName: {{ .Values.query_frontend.priorityClassName }} + {{- end }} + {{- if .Values.query_frontend.securityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.query_frontend.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: query-frontend + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=query-frontend" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.frontend-memcached" . | nindent 12 }} + {{- range $key, $value := .Values.query_frontend.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.query_frontend.extraVolumeMounts }} + {{- toYaml .Values.query_frontend.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.query_frontend.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.query_frontend.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.query_frontend.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.query_frontend.resources | nindent 12 }} + {{- if .Values.query_frontend.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.env }} + env: + {{- toYaml .Values.query_frontend.env | nindent 12 }} + {{- end }} + {{- with .Values.query_frontend.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.extraContainers }} + {{- toYaml .Values.query_frontend.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.query_frontend.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.query_frontend.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.query_frontend.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.query_frontend.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.query_frontend.extraVolumes }} + {{- toYaml .Values.query_frontend.extraVolumes | nindent 8}} + {{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml new file mode 100644 index 0000000..2d76c6b --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.query_frontend.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- if .Values.query_frontend.serviceMonitor.additionalLabels }} +{{ toYaml .Values.query_frontend.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.query_frontend.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.query_frontend.serviceMonitor.interval }} + interval: {{ .Values.query_frontend.serviceMonitor.interval }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.query_frontend.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.query_frontend.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml new file mode 100644 index 0000000..939457c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml new file mode 100644 index 0000000..85ff2e8 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml new file mode 100644 index 0000000..5256949 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.query_frontend.replicas) 1) (.Values.query_frontend.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.query_frontend.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl new file mode 100644 index 0000000..86270d0 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl @@ -0,0 +1,30 @@ + +{{/* +ruler fullname +*/}} +{{- define "cortex.rulerFullname" -}} +{{ include "cortex.fullname" . }}-ruler +{{- end }} + +{{/* +ruler common labels +*/}} +{{- define "cortex.rulerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +ruler selector labels +*/}} +{{- define "cortex.rulerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +format rules dir +*/}} +{{- define "cortex.rulerRulesDirName" -}} +rules-{{ . | replace "_" "-" | trimSuffix "-" }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml new file mode 100644 index 0000000..8448108 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.ruler.enabled }} +{{- range $dir, $files := .Values.ruler.directories }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" $ | nindent 4 }} +data: + {{- toYaml $files | nindent 2}} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml new file mode 100644 index 0000000..a8e034d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml @@ -0,0 +1,191 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ruler.annotations | nindent 4 }} +spec: + replicas: {{ .Values.ruler.replicas }} + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ruler.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.rulerLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ruler.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ruler.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ruler.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ruler.priorityClassName }} + priorityClassName: {{ .Values.ruler.priorityClassName }} + {{- end }} + {{- if .Values.ruler.securityContext.enabled }} + securityContext: {{- omit .Values.ruler.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ruler.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + {{- if .Values.ruler.sidecar.enabled }} + - name: {{ template "cortex.name" . }}-sc-rules + {{- if .Values.ruler.sidecar.image.sha }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}@sha256:{{ .Values.ruler.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.ruler.sidecar.imagePullPolicy }} + env: + {{- if .Values.ruler.sidecar.watchMethod }} + - name: METHOD + value: {{ .Values.ruler.sidecar.watchMethod }} + {{ end }} + - name: LABEL + value: "{{ .Values.ruler.sidecar.label }}" + {{- if .Values.ruler.sidecar.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.ruler.sidecar.labelValue }} + {{- end }} + - name: FOLDER + value: "{{ .Values.ruler.sidecar.folder }}{{- with .Values.ruler.sidecar.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.ruler.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.ruler.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.ruler.sidecar.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.ruler.sidecar.searchNamespace }}" + {{- end }} + {{- if .Values.ruler.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.ruler.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.ruler.sidecar.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ .Values.ruler.sidecar.folderAnnotation }}" + {{- end }} + resources: + {{- toYaml .Values.ruler.sidecar.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.sidecar.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{- end }} + - name: rules + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ruler" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configs.enabled }} + - "-ruler.configs.url=http://{{ template "cortex.configsFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}" + {{- end }} + {{- if not .Values.config.ruler.alertmanager_url }} + {{- if .Values.config.ruler.enable_alertmanager_discovery }} + - "-ruler.alertmanager-url=http://_http-metrics._tcp.{{ template "cortex.name" . }}-alertmanager-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}/api/prom/alertmanager/" + {{- else }} + - "-ruler.alertmanager-url=http://{{ template "cortex.alertmanagerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}/api/prom/alertmanager/" + {{- end }} + {{- end }} + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ruler.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ruler.extraVolumeMounts }} + {{- toYaml .Values.ruler.extraVolumeMounts | nindent 12}} + {{- end }} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{ end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: /data + subPath: {{ .Values.ruler.persistentVolume.subPath }} + - name: tmp + mountPath: /rules + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + mountPath: /etc/cortex/rules/{{ $dir }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.ruler.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.ruler.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.ruler.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ruler.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ruler.env }} + env: + {{- toYaml .Values.ruler.env | nindent 12 }} + {{- end }} + {{- if .Values.ruler.extraContainers }} + {{- toYaml .Values.ruler.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.ruler.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ruler.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ruler.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ruler.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: tmp + emptyDir: {} + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + configMap: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + {{- end }} + - name: storage + emptyDir: {} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + emptyDir: {} + {{- end }} + {{- if .Values.ruler.extraVolumes }} + {{- toYaml .Values.ruler.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml new file mode 100644 index 0000000..52fb3e0 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ruler.replicas) 1) (.Values.ruler.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ruler.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml new file mode 100644 index 0000000..de6744f --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ruler.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- if .Values.ruler.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ruler.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ruler.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ruler.serviceMonitor.interval }} + interval: {{ .Values.ruler.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ruler.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ruler.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ruler.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ruler.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml new file mode 100644 index 0000000..7752ef4 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- with .Values.ruler.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ruler.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.rulerSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml new file mode 100644 index 0000000..2b30599 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml @@ -0,0 +1,18 @@ +{{- with .Values.runtimeconfigmap }} +{{- if .create }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" $ }}-runtime-config + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.labels" $ | nindent 4 }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + runtime_config.yaml: | + {{- tpl (toYaml .runtime_config) $ | nindent 4 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml new file mode 100644 index 0000000..9194971 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.configsdb_postgresql.enabled .Values.configsdb_postgresql.auth.password -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }}-postgresql + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + postgresql-password: {{ .Values.configsdb_postgresql.auth.password | b64enc}} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret.yaml new file mode 100644 index 0000000..ff0e78f --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if (and (not .Values.useExternalConfig) (not .Values.useConfigMap)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: {{ tpl (toYaml .Values.config) . | b64enc }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml new file mode 100644 index 0000000..963f866 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} + annotations: + {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl new file mode 100644 index 0000000..3cca867 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl @@ -0,0 +1,23 @@ + +{{/* +store-gateway fullname +*/}} +{{- define "cortex.storeGatewayFullname" -}} +{{ include "cortex.fullname" . }}-store-gateway +{{- end }} + +{{/* +store-gateway common labels +*/}} +{{- define "cortex.storeGatewayLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} + +{{/* +store-gateway selector labels +*/}} +{{- define "cortex.storeGatewaySelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml new file mode 100644 index 0000000..1019cc8 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.store_gateway.replicas) 1) (.Values.store_gateway.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + {{- toYaml .Values.store_gateway.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml new file mode 100644 index 0000000..39eaeda --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.store_gateway.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- if .Values.store_gateway.serviceMonitor.additionalLabels }} +{{ toYaml .Values.store_gateway.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.store_gateway.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.store_gateway.serviceMonitor.interval }} + interval: {{ .Values.store_gateway.serviceMonitor.interval }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.store_gateway.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.store_gateway.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml new file mode 100644 index 0000000..0238c75 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml @@ -0,0 +1,142 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.store_gateway.annotations | nindent 4 }} +spec: + replicas: {{ .Values.store_gateway.replicas }} + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.store_gateway.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-store-gateway-headless + {{- if .Values.store_gateway.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.store_gateway.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.store_gateway.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.store_gateway.persistentVolume.storageClass }} + {{- if (eq "-" .Values.store_gateway.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.store_gateway.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{- toYaml .Values.store_gateway.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.store_gateway.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.store_gateway.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.store_gateway.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.store_gateway.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.store_gateway.priorityClassName }} + priorityClassName: {{ .Values.store_gateway.priorityClassName }} + {{- end }} + {{- if .Values.store_gateway.securityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.store_gateway.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.store_gateway.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.store_gateway.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.store_gateway.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.store_gateway.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.store_gateway.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.store_gateway.extraVolumes }} + {{- toYaml .Values.store_gateway.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.store_gateway.extraContainers }} + {{ toYaml .Values.store_gateway.extraContainers | nindent 8 }} + {{- end }} + - name: store-gateway + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=store-gateway" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.store_gateway.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.store_gateway.extraVolumeMounts }} + {{- toYaml .Values.store_gateway.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.store_gateway.persistentVolume.subPath }} + subPath: {{ .Values.store_gateway.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.store_gateway.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.store_gateway.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.store_gateway.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.store_gateway.resources | nindent 12 }} + {{- if .Values.store_gateway.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.store_gateway.env }} + env: + {{- toYaml .Values.store_gateway.env | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml new file mode 100644 index 0000000..c56ec77 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml @@ -0,0 +1,24 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml new file mode 100644 index 0000000..f58019b --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml @@ -0,0 +1,23 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml new file mode 100644 index 0000000..fc41461 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.fullname" . }}-memberlist + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + name: gossip + targetPort: gossip + selector: + {{- include "cortex.selectorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl new file mode 100644 index 0000000..4798c6d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl @@ -0,0 +1,23 @@ + +{{/* +table-manager fullname +*/}} +{{- define "cortex.tableManagerFullname" -}} +{{ include "cortex.fullname" . }}-table-manager +{{- end }} + +{{/* +table-manager common labels +*/}} +{{- define "cortex.tableManagerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: table-manager +{{- end }} + +{{/* +table-manager selector labels +*/}} +{{- define "cortex.tableManagerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: table-manager +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml new file mode 100644 index 0000000..d24dcc3 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml @@ -0,0 +1,106 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.table_manager.annotations | nindent 4 }} +spec: + replicas: {{ .Values.table_manager.replicas }} + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.table_manager.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.tableManagerLabels" . | nindent 8 }} + {{- with .Values.table_manager.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.table_manager.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.table_manager.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.table_manager.priorityClassName }} + priorityClassName: {{ .Values.table_manager.priorityClassName }} + {{- end }} + {{- if .Values.table_manager.securityContext.enabled }} + securityContext: {{- omit .Values.table_manager.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.table_manager.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: table-manager + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=table-manager" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.table_manager.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.table_manager.extraVolumeMounts }} + {{- toYaml .Values.table_manager.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.table_manager.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.table_manager.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.table_manager.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.table_manager.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.table_manager.resources | nindent 12 }} + {{- if .Values.table_manager.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.table_manager.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.env }} + env: + {{- toYaml .Values.table_manager.env | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.extraContainers }} + {{- toYaml .Values.table_manager.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.table_manager.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.table_manager.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.table_manager.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.table_manager.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.table_manager.extraVolumes }} + {{- toYaml .Values.table_manager.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml new file mode 100644 index 0000000..91adabf --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.table_manager.replicas) 1) (.Values.table_manager.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.table_manager.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml new file mode 100644 index 0000000..9748724 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.table_manager.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- if .Values.table_manager.serviceMonitor.additionalLabels }} +{{ toYaml .Values.table_manager.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.table_manager.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.table_manager.serviceMonitor.interval }} + interval: {{ .Values.table_manager.serviceMonitor.interval }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.table_manager.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.table_manager.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.table_manager.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.table_manager.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml new file mode 100644 index 0000000..ff3c57d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml @@ -0,0 +1,23 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- with .Values.table_manager.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.table_manager.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/cortex/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/values.yaml new file mode 100644 index 0000000..4a0f8c8 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/cortex/values.yaml @@ -0,0 +1,1605 @@ +image: + #repository: quay.io/cortexproject/cortex + repository: 10.10.31.243:5000/cmoa3/cortex + # -- Allows you to override the cortex version in this chart. Use at your own risk. + #tag: "" + tag: v1.11.0 + pullPolicy: IfNotPresent + + # -- Optionally specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # pullSecrets: [] + pullSecrets: + - regcred + + +# -- Kubernetes cluster DNS domain +clusterDomain: cluster.local + +tags: + # -- Set to true to enable block storage memcached caching + blocks-storage-memcached: false + +ingress: + enabled: false + ingressClass: + enabled: false + name: "nginx" + annotations: {} + hosts: + - host: chart-example.local + paths: + - / + tls: [] + +serviceAccount: + create: true + name: + annotations: {} + automountServiceAccountToken: true + +useConfigMap: false +useExternalConfig: false +externalConfigSecretName: 'secret-with-config.yaml' +externalConfigVersion: '0' + +config: + auth_enabled: false + api: + prometheus_http_prefix: '/prometheus' + # -- Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs + # which can benefit from compression. + response_compression_enabled: true + ingester: + walconfig: + wal_enabled: true + flush_on_shutdown_with_wal_enabled: true + recover_from_wal: true + lifecycler: + # -- We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. + # It can take a while to have the full picture when using gossip + join_after: 10s + + # -- To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, + # after putting their own tokens into it. This is only useful when using gossip, since multiple + # ingesters joining at the same time can have conflicting tokens if they don't see each other yet. + observe_period: 10s + # -- Duration to sleep for before exiting, to ensure metrics are scraped. + final_sleep: 30s + num_tokens: 512 + ring: + # -- Ingester replication factor per default is 3 + replication_factor: 3 + kvstore: + store: "memberlist" + limits: + # -- Enforce that every sample has a metric name + enforce_metric_name: true + reject_old_samples: true + reject_old_samples_max_age: 168h + max_query_lookback: 0s + server: + http_listen_port: 8080 + grpc_listen_port: 9095 + grpc_server_max_recv_msg_size: 10485760 + grpc_server_max_send_msg_size: 10485760 + grpc_server_max_concurrent_streams: 10000 + ingester_client: + grpc_client_config: + max_recv_msg_size: 10485760 + max_send_msg_size: 10485760 + # -- See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config + storage: + engine: blocks + index_queries_cache_config: + memcached: + # -- How long keys stay in the memcache + expiration: 1h + memcached_client: + # -- Maximum time to wait before giving up on memcached requests. + timeout: 1s + blocks_storage: + # custume backend setting related to using s3 + backend: s3 + s3: + bucket_name: cortex-bucket + # -- The S3 bucket endpoint. It could be an AWS S3 endpoint listed at + # https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an + # S3-compatible service in hostname:port format. + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + + tsdb: + dir: /data/tsdb + bucket_store: + sync_dir: /data/tsdb-sync + bucket_index: + enabled: true + # -- https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config + store_gateway: + sharding_enabled: false + distributor: + # -- Distribute samples based on all labels, as opposed to solely by user and + # metric name. + shard_by_all_labels: true + pool: + health_check_ingesters: true + memberlist: + bind_port: 7946 + # -- the service name of the memberlist + # if using memberlist discovery + join_members: + - '{{ include "cortex.fullname" $ }}-memberlist' + querier: + active_query_tracker_dir: /data/active-query-tracker + # -- Maximum lookback beyond which queries are not sent to ingester. 0 means all + # queries are sent to ingester. Ingesters by default have no data older than 12 hours, + # so we can safely set this 13 hours + query_ingesters_within: 9h + # -- The time after which a metric should be queried from storage and not just + # ingesters. + query_store_after: 7h + # -- Comma separated list of store-gateway addresses in DNS Service Discovery + # format. This option should is set automatically when using the blocks storage and the + # store-gateway sharding is disabled (when enabled, the store-gateway instances + # form a ring and addresses are picked from the ring). + # @default -- automatic + store_gateway_addresses: |- + {{ if and (eq .Values.config.storage.engine "blocks") (not .Values.config.store_gateway.sharding_enabled) -}} + dns+{{ include "cortex.storeGatewayFullname" $ }}-headless:9095 + {{- end }} + query_range: + split_queries_by_interval: 24h + align_queries_with_step: true + cache_results: true + results_cache: + cache: + memcached: + expiration: 1h + memcached_client: + timeout: 1s + ruler: + enable_alertmanager_discovery: false + # -- Enable the experimental ruler config api. + alertmanager_url: 'http://alertmanager.imxc/alertmanager' + enable_api: true + # -- Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config + storage: {} + runtime_config: + file: /etc/cortex-runtime-config/runtime_config.yaml + alertmanager: + # -- Enable the experimental alertmanager config api. + enable_api: true + external_url: 'http://alertmanager.imxc/alertmanager' + #external_url: '/api/prom/alertmanager' + # -- Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config + storage: {} + frontend: + log_queries_longer_than: 10s + # S3 사용 관련 커스텀 설정 + alertmanager_storage: + s3: + bucket_name: cortex-alertmanager + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + ruler_storage: + s3: + bucket_name: cortex-ruler + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + +runtimeconfigmap: + # -- If true, a configmap for the `runtime_config` will be created. + # If false, the configmap _must_ exist already on the cluster or pods will fail to create. + create: true + annotations: {} + # -- https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file + # 설정부 + runtime_config: {} +alertmanager: + enabled: true + replicas: 1 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful for using a persistent volume for storing silences between restarts. + enabled: false + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log level (debug, info, warn, error) + extraArgs: {} + # -experimental.alertmanager.enable-api: "true" + # -alertmanager.web.external-url: /alertmanager + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + + persistentVolume: + # -- If true and alertmanager.statefulSet.enabled is true, + # Alertmanager will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Alertmanager data Persistent Volume Claim annotations + annotations: {} + + # -- Alertmanager data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Alertmanager data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Alertmanager data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Alertmanager data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + # -- Tolerations for pod assignment + # ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + # -- If not set then a PodDisruptionBudget will not be created + podDisruptionBudget: + maxUnavailable: 1 + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 60 + + # -- Init containers to be added to the cortex pod. + initContainers: [] + + # -- Additional containers to be added to the cortex pod. + extraContainers: [] + + # -- Additional volumes to the cortex pod. + extraVolumes: [] + + # -- Extra volume mounts that will be added to the cortex container + extraVolumeMounts: [] + + # -- Additional ports to the cortex services. Useful to expose extra container ports. + extraPorts: [] + + # -- Extra env variables to pass to the cortex container + env: [] + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # -- skipTlsVerify Set to true to skip tls verification for kube api calls + skipTlsVerify: false + enableUniqueFilenames: false + enabled: false + label: cortex_alertmanager + watchMethod: null + labelValue: null + folder: /data + defaultFolderName: null + searchNamespace: null + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +distributor: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + -validation.max-label-names-per-series: "45" + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - distributor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the distributor pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 60 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +ingester: + replicas: 3 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful when using WAL + enabled: true + # -- ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details + podManagementPolicy: OrderedReady + + service: + annotations: {} + labels: {} + + serviceAccount: + name: + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - ingester + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 30 + targetMemoryUtilizationPercentage: 80 + behavior: + scaleDown: + # -- see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details + policies: + - type: Pods + value: 1 + # set to no less than 2x the maximum between -blocks-storage.bucket-store.sync-interval and -compactor.cleanup-interval + periodSeconds: 1800 + # -- uses metrics from the past 1h to make scaleDown decisions + stabilizationWindowSeconds: 3600 + scaleUp: + # -- This default scaleup policy allows adding 1 pod every 30 minutes. + # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + policies: + - type: Pods + value: 1 + periodSeconds: 1800 + + lifecycle: + # -- The /shutdown preStop hook is recommended as part of the ingester + # scaledown process, but can be removed to optimize rolling restarts in + # instances that will never be scaled down or when using chunks storage + # with WAL disabled. + # https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down + preStop: + httpGet: + path: "/ingester/shutdown" + port: http-metrics + + persistentVolume: + # -- If true and ingester.statefulSet.enabled is true, + # Ingester will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: true + + # -- Ingester data Persistent Volume Claim annotations + annotations: {} + + # -- Ingester data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Ingester data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Ingester data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Ingester data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: exem-local-storage + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + startupProbe: {} + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + livenessProbe: {} + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +ruler: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + #-ruler.configs.url: http://cortex-configs:8080 + #-ruler.alertmanager-url: http://cortex-alertmanager:8080 + -ruler.storage.type: configdb + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + # -- allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html + directories: {} + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + # requests: + # cpu: 50m + # memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + enabled: false + # -- label that the configmaps with rules are marked with + label: cortex_rules + watchMethod: null + # -- value of label that the configmaps with rules are set to + labelValue: null + # -- folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) + folder: /tmp/rules + # -- The default folder name, it will create a subfolder under the `folder` and put rules in there instead + defaultFolderName: null + # -- If specified, the sidecar will search for rules config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # -- If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +querier: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - querier + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the querier pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +query_frontend: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - query-frontend + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +table_manager: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +configs: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + # -configs.database.migrations-dir: /migrations + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +nginx: + enabled: true + replicas: 2 + http_listen_port: 80 + config: + dnsResolver: coredns.kube-system.svc.cluster.local + # -- ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size + client_max_body_size: 20M + # -- arbitrary snippet to inject in the http { } section of the nginx config + httpSnippet: "" + # -- arbitrary snippet to inject in the top section of the nginx config + mainSnippet: "" + # -- arbitrary snippet to inject in the server { } section of the nginx config + serverSnippet: "" + setHeaders: {} + # -- (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config + auth_orgs: [] + # -- (optional) Name of basic auth secret. + # In order to use this option, a secret with htpasswd formatted contents at + # the key ".htpasswd" must exist. For example: + # + # apiVersion: v1 + # kind: Secret + # metadata: + # name: my-secret + # namespace: + # stringData: + # .htpasswd: | + # user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 + # user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ + # + # Please note that the use of basic auth will not identify organizations + # the way X-Scope-OrgID does. Thus, the use of basic auth alone will not + # prevent one tenant from viewing the metrics of another. To ensure tenants + # are scoped appropriately, explicitly set the `X-Scope-OrgID` header + # in the nginx config. Example + # setHeaders: + # X-Scope-OrgID: $remote_user + basicAuthSecretName: "" + + image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: 1.21 + pullPolicy: IfNotPresent + + service: + type: ClusterIP + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: {} + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /healthz + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /healthz + port: http-metrics + readinessProbe: + httpGet: + path: /healthz + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: false + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 10 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the nginx pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + +store_gateway: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - store-gateway + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true Store-gateway will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Store-gateway data Persistent Volume Claim annotations + annotations: {} + + # -- Store-gateway data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Store-gateway data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Store-gateway data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Store-gateway data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +compactor: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - compactor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true compactor will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- compactor data Persistent Volume Claim annotations + annotations: {} + + # -- compactor data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # compactor data Persistent Volume size + size: 2Gi + + # -- Subdirectory of compactor data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- compactor data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +# -- chunk caching for legacy chunk storage engine +memcached: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index read caching for legacy chunk storage engine +memcached-index-read: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index write caching for legacy chunk storage engine +memcached-index-write: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-frontend: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-index: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-metadata: + # enabled/disabled via the tags.blocks-storage-memcached boolean + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +configsdb_postgresql: + enabled: true + uri: postgres://admin@postgres/configs?sslmode=disable + auth: + password: eorbahrhkswp + existing_secret: + name: + key: diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/Chart.yaml new file mode 100644 index 0000000..be38643 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: 7.6.0 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +icon: https://helm.elastic.co/icons/elasticsearch.png +maintainers: +- email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +sources: +- https://github.com/elastic/elasticsearch +version: 7.6.0 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml new file mode 100644 index 0000000..2631417 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch-headless + labels: + app: elasticsearch +spec: + clusterIP: None + selector: + app: elasticsearch + ports: + - name: transport + port: 9300 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml new file mode 100644 index 0000000..505cc5a --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch + labels: + app: elasticsearch +spec: + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 +# nodePort: 30200 +# type: NodePort + type: ClusterIP diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml new file mode 100644 index 0000000..ee0a42d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: imxc + name: elasticsearch-config + labels: + app: elasticsearch +data: +# discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch", "elasticsearch-2.elasticsearch"] +# cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1", "elasticsearch-2"] +# ES_JAVA_OPTS: -Xms8g -Xmx8g + elasticsearch.yml: | + cluster.name: imxc-elasticsearch-cluster + network.host: ${POD_NAME} + discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch"] + cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1"] + xpack.ml.enabled: false + xpack.security.enabled: true + xpack.security.transport.ssl.enabled: true + xpack.security.transport.ssl.verification_mode: certificate + xpack.security.transport.ssl.client_authentication: required + xpack.security.transport.ssl.keystore.path: elastic-certificates.p12 + xpack.security.transport.ssl.truststore.path: elastic-certificates.p12 + xpack.security.transport.filter.enabled: true + xpack.security.transport.filter.allow: _all + xpack.security.http.ssl.enabled: true + xpack.security.http.ssl.keystore.path: http.p12 + node.ml: false + cluster.routing.rebalance.enable: "all" + cluster.routing.allocation.allow_rebalance: "indices_all_active" + cluster.routing.allocation.cluster_concurrent_rebalance: 2 + cluster.routing.allocation.balance.shard: 0.3 + cluster.routing.allocation.balance.index: 0.7 + cluster.routing.allocation.balance.threshold: 1 + cluster.routing.allocation.disk.threshold_enabled: true + cluster.routing.allocation.disk.watermark.low: "85%" + cluster.routing.allocation.disk.watermark.high: "90%" + cluster.routing.allocation.disk.watermark.flood_stage: "95%" + thread_pool.write.queue_size: 1000 + thread_pool.write.size: 2 + ES_JAVA_OPTS: -Xms8g -Xmx8g diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml new file mode 100644 index 0000000..5a53f57 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-0 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-1 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-elasticsearch-cluster-2 +# labels: +# type: local +# app: elasticsearch +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.ELASTICSEARCH_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: elasticsearch-storage +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: +# - {{ .Values.global.ELASTICSEARCH_HOST3 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml new file mode 100644 index 0000000..a4ae2db --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml @@ -0,0 +1,53 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +#kind: PersistentVolumeClaim +#apiVersion: v1 +#metadata: +# namespace: imxc +# name: elasticsearch-data-elasticsearch-2 +#spec: +# accessModes: +# - ReadWriteOnce +# volumeMode: Filesystem +# resources: +# requests: +# storage: 30Gi +# storageClassName: elasticsearch-storage +# selector: +# matchLabels: +# type: local +# app: elasticsearch \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml new file mode 100644 index 0000000..2cbd4b8 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml @@ -0,0 +1,146 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: apps/v1beta1 +{{- end }} +kind: StatefulSet +metadata: + namespace: imxc + name: elasticsearch +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: elasticsearch +{{- end }} + serviceName: elasticsearch + replicas: 2 #3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: elasticsearch + spec: + securityContext: + fsGroup: 1000 + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - elasticsearch + topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: elastic-node + operator: In + values: + - "true" + initContainers: + - name: init-sysctl + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + #command: ["sysctl", "-w", "vm.max_map_count=262144"] + command: ["/bin/sh", "-c"] + args: ["sysctl -w vm.max_map_count=262144; chown -R 1000:1000 /usr/share/elasticsearch/data"] + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + containers: + - name: elasticsearch + resources: + requests: + cpu: 1000m + memory: 16000Mi #32000Mi + limits: + cpu: 2000m + memory: 16000Mi #32000Mi + securityContext: + privileged: true + runAsUser: 1000 + capabilities: + add: + - IPC_LOCK + - SYS_RESOURCE + image: {{ .Values.global.IMXC_IN_REGISTRY }}/elasticsearch:{{ .Values.global.ELASTICSEARCH_VERSION }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ES_JAVA_OPTS + valueFrom: + configMapKeyRef: + name: elasticsearch-config + key: ES_JAVA_OPTS + # log4j patch + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: ELASTIC_USERNAME + value: {{ .Values.global.CMOA_ES_ID }} + - name: ELASTIC_PASSWORD + value: {{ .Values.global.CMOA_ES_PW }} + readinessProbe: + httpGet: + scheme: HTTPS + path: /_cluster/health?local=true + port: 9200 + httpHeaders: + - name: Authorization + # encode base64 by elastic:elastic + value: Basic ZWxhc3RpYzplbGFzdGlj + initialDelaySeconds: 5 + ports: + - containerPort: 9200 + name: es-http + - containerPort: 9300 + name: es-transport + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + - name: elasticsearch-config + mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + subPath: elasticsearch.yml + - name: es-cert-certificate + mountPath: /usr/share/elasticsearch/config/elastic-certificates.p12 + subPath: elastic-certificates.p12 + - name: es-cert-ca + mountPath: /usr/share/elasticsearch/config/elastic-stack-ca.p12 + subPath: elastic-stack-ca.p12 + - name: es-cert-http + mountPath: /usr/share/elasticsearch/config/http.p12 + subPath: http.p12 + volumes: + - name: elasticsearch-config + configMap: + name: elasticsearch-config + items: + - key: elasticsearch.yml + path: elasticsearch.yml + - name: es-cert-certificate + secret: + secretName: es-cert + - name: es-cert-ca + secret: + secretName: es-cert + - name: es-cert-http + secret: + secretName: es-cert + volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: elasticsearch-storage + resources: + requests: + storage: 10Gi diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml new file mode 100644 index 0000000..2a24b92 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +data: + elastic-certificates.p12: MIINbwIBAzCCDSgGCSqGSIb3DQEHAaCCDRkEgg0VMIINETCCBW0GCSqGSIb3DQEHAaCCBV4EggVaMIIFVjCCBVIGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRrCrEWs79GCUPrYkFrkDBEF9uz4gIDAMNQBIIEyJUjcP339Anee6bdJls469HbsqYGgzidG41xto7ignNdZdP9LTTca+w8sN8tbVnTUZi4kQYcPSQqv+cWobi66KpgvQ7HhA/YE9K5L7wR7KEj0o61LYvucHm19hRdt788EvBy4mi8cDAr3m49NNuLUM6wyeCEKr2W2dwZFIyxFTPVv6/ef6cuHyDNLXJtjUmOIzNDL8Olqk8JGAd9bwXlizcShfmbiHHX8pAhK0u9JThFQePvCGiKA4LwzeuuwuEniznMlUQ4T/TjLjLLYcoS4vktfOJKPOgL3esjsc5hPoVgbw+ZpNCxRq1RVs/5eOBkxzXhJ7hdNELJDcMjitBfl71MlSDtMV4FhlVuhjilsuHx6URucsEE2l1V3asg4QP1PoSiACqncr2WhCcrKu0d8DztlIkCYG7D8oiAx4nEzsm0xmOhIcigHw6GP4MNeCieJCgAwLkJf1m73IYcxyaKsJAc57jfs9ue62KkVHL2NxNRjTps2j0Cl5NJQRE4CTkieU0etsNS1nJEwiJunVTyHXAa53MF6j40awEqs2Ko4gQENPpuQc599yJb+ZTHfHPe8bpfrmnxiEAaeiABu+OVH9bdLK5gtCyD5vXGZKVtHbyR+0+UlBggw/horFQIP+x7SKO53+ho0iCnYyQK52kJiv93JNgStGHpxf1SkPTtWHOraR2qSZTX6F7vjBtIq3Y6ocb6yo/jMNhzk3spHdz+F99S6uV3NLmDfX2vJmu1YSaPwaNZGDggcFI/g2S5ylBWyHpk2rB5gtklUIQEWxFFvbFOp37ffcdC0mZ6SgpOxj+IxuVLqTvyDLjrfteEvfjRAFXsT8E4XikC8QKjQ+KAwDYETidOiYB0/ByCh7t1KbcKJWU8XYxqzukX88CyVtO9Lp/f97x3ycvaF1UfzLBrm/bnTa0jPEP2/OdzpbjQJcEGX64+QY92k38zjPe4tedUz5H/C9aw8Q8r/DSxUhn2sdDXssR9jytITLLOJHDJX7XCfZxtoW60bwRm5MyXc4bJmjZT2BgxTWIVokaOhk0IZwpbC/oxh1QkaHBioP6+slASXg8Xu9l+mACevb1b9RvpN+fhurW2wOHl4Kul775BCohuTtiqKAce8KEACwncwYz+ZfcPTkbLRy6+p6NI3zNWpZE+iFlPtLh+2+T/QQHEfKTNUxcXLt8WCMOZuCe776T41nY8UhbUQJKqlEvom3MzCcsvFBoahlpjv+rg9/Ay7ESMil49e2x3qbD2929X0BHz//RcvPO5fvSEK/tC2uHzWzqHf0ZaRwtO19Z95Uv3GjGNF0SO8qri830LfJ+ctjk320qLyZmxA9QgPoI2oMHSxkaX1fgVeiN9coBM8yJbPK8ZdOOg4abnYOhqrTJXaoSFo+SYyAVZoTiQIIk/JScL5Qcw9IJw6sSKmOdChy2spYQKeo1NU9ecLD8YRBqRP0EET7e7NDPKlIWQ1vB5y2hokyL7bxvbGgzqQBAyo9wKJ3v1g4IYEWA9mluvQapOMVEHBYh6wv2nTJpE9EqMxpYQBU1w+vgX0EUgZDEOBkbvd5wubAeERt0mJqjea6vxWJIbeqMVIIoJSZEDaPE5qVNYaosoc8yvAZ9+U3lZlZObHzHEAIUx/2pP/jFEMB8GCSqGSIb3DQEJFDESHhAAaQBuAHMAdABhAG4AYwBlMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE4MTk0NzgwggecBgkqhkiG9w0BBwagggeNMIIHiQIBADCCB4IGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFP43u2ii0k7JTUfInMhUBwjWZrS/AgMAw1CAggdItHB4SBc5KdDVc8eXuF8Ex1WP/Y2wz76PoNNpYm2LeIVozsp5c/2RDN2KqhcvhTihlY44esqWWVCOx+OTwmAPFwzZSrMaOYpcOP3fRWaHJLw98cK8a1ZuNv3eXWecf333TrsvU/bpT3v0KNO915qnSbtNwlvXaOMm6jbw6eBnkB7i6jxA7kgVAW6soa3ZHOrV78quBSbAjXZddHsI8x3MS4rxdvkp6GHet22/fQxjxz8UlQEDqzQgK7F4RqULRJeU//JID7VJqfbHRHfnYsKszsirhWKeJsxLVhG1VU/zRgxs0C35NfQeR/o7jmFpE7CCvvC0Rea2pybNojb51HLvyycXtpGn0gAdTBVNnwK1X58uSDWH7jM61uX9f+/gcDZqlUj6UVc6mzqxAgzDtf6B32G0VQq2szaJjbRVEVXhCAOIdVj6pRpI3l3gRv8OkNAWsGwYDMjeFxnrEpw1AQkEj7FRgI6iNOxEfUhOVYIEsflGTUdcd+K+zlCCHAJoMzbqiwPyHHgvLOp04A7fog+H3/cn6Tdmrp/J7TxpaW1ZwwcHtTRLoq0F77Sj8XJule3CzaDtg6IBen/Yo7H9hhK3ORodlGjJYA285dHAd1mtqmHmoWeDNoVrlVyymge78yXGmlFsBWF83VUChRx+9noF3Zhz+QMPBNsKHk4TM9yRHiWpMZIdkEZKq+obCPU2PmC21wnWx13nhb88gaNyBjHxFsGE91SgEyQh/cPhi01Y7+yNYQvYOXJe3EQ6oqFCBkPUnrbAMiHDP//AVN/tUrgVbmpIclfFprP2YIRcfGa7qch48RFbmhnX5N/OYLaPnNYdbxOiwZ0f/KIpDKWS67kS2N+jDKWs/SCLs2g89q1z2EGvbVwKMD6Vl559EZxAfNRv+eZu0MvTejEkuykIHJpXCyP+8EphUyWW9Cqll1ux4rXMUDkgl5sh1WgSoIEASX2j5TJ3fIh0nBkjAkBi0n2BINZgVWKj9U1zHNdRF67Eb+97lUuY6JIkbFhLSgZiIZqnI9bnW8OKUJFtvVtlSKG4xqdOeAroB8GLw2iR/GjF2Dvy4rIZo+qeTCIN+bm+iFkCri7L2K0/KR25h7bAtXwBxwMct5F4A1vltlLs408efMRJ7dg3iqMGhRyXdwxKexWJLbp02uJQVU9/ogYeLfSiIZEm25qjEMQZqRpQpwLaH5JB9oLKqdLEdeuxOfqb6weHDOtITlFHToeRNzIEmbiT9gbdpMwKTxs/rtwMHgGU6kIJmIFgnw2gauKvpiIuDCY79JpSNipsicvvLTIa4cc8sZCCllZ1wAmbNDsCH6p0bh8CooMjGf2vUbRClSe9+R19/lRMFGSp4N6fElW7MxNw85xpkFjG0s053fvIJmfPhxVqUHMP3fFQv0DUvvQNvNTsRGdDjohkC0095v9EWy7n9Frv2wIM2G7uVHvrlgkQfPK2JsYZKsUE0KXa4HUQptWL71kp7RQSmOmXFzsthjYVXu/pfXA+u+PAtHvQpo1nTPreXn3UZqiEiQmNkmMPLAYzpIi35tjNewfw5XwDj77pqH5OFcMZDTKbiInV1LuvFlKxCEYh4gvTThC0XTsrsiHgldtNcw9ZB017uPW9AAqbj2IB0d5b0ZB3yMZ67uzt1pretcxmEfSoA64QWOC9lBYp4DVE9QxcCnsSgibWreqpdJHmX5MR4umwIb6WaM1pJdCY1bW4tO3ZVT4DA/4ry7jqxUH4AcZRNK0zYR6DAtZndB7LTJhT+8d5EBtmAHzC5HT9KLmHV6mAG1QLMlwhNXmtM0YCJsKxcZo+xLBy/2cHl41EU4ACiuEq1JrM5j9fQk+hmJHT+JB0aqv+kvdxGmgBuVWGHQBtNTV6TYeLzqzDpIl9uXi3qFKFBuTQOska2zAMv7gLOe79w1cVb/SJKdcYjWtLR0v6wfaRgVeBwLvTvh7nNXhXRqKfQKe3e2Tjgq4nV4kOQHI21WDKGSd4ONyyvXGMwNzRgcZwpDFAcvshZATwaBtAo4JWi6D3vJB6H1PHRtyqHjErKkPazoZMjR2sZI8S4BMo4R5fa1ZztZO4p2lJYUIAQHj872UdGXHTXgyZKU8t/ifiVfxon5UtZJRi0Xq5OMdN//Qtq2kVwQxntf0eWsygkKMtNr1XLzu0TAMUMItnohdQWUw5w8UeXYOAYfZFqZEhKfcwkJsfq1q56ptzVBI3T2hDFM7xuVFNn5y+FCTx9pB9FCbln/3ZlKuUiTH/eLMKdQYGkRX4X0qzkx3YqAn6jDLQPEG3Rz0JP53T43uLxGpqa8+jn1XIUCNj50mqZGiah7bdo1qsDHbFWYCe7uoOjPapontpaoEQaZog1INqBNerS19a+i4S0/uAsGApykwUhk/zGfr9UudpKJWd7AznlF3+yfZfk/9mCSajBpoWafCIWmOvxJD77L86YAs9STuhWUGQvL2rxPf2uyS4WAi2+DgbdrGTSiwNB/1YX8iHp/cw6DA+MCEwCQYFKw4DAhoFAAQUSvLiFrAQlmfgL3Cewez5Fw2+0okEFH+RyXvcJHVaYbaqjejrXkgUS0JsAgMBhqA= + elastic-stack-ca.p12: MIIJ2wIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCBWEGCSqGSIb3DQEHAaCCBVIEggVOMIIFSjCCBUYGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBTQSr5nf5M77CSAHwj38PF//hiFVgIDAMNQBIIEyBrOipz1FxDRF9VG/4bMmue7Dt+Qm37ySQ/ZfV3hFTg6xwjEcHje6hvhzQtFeWppCvd4+7U/MG8G5xL0vfV5GzX1RhVlpgYRfClqMZo3URqBNu6Y5t3sum+X37zbXQ1GI6wo3YURStZkDHlVtObZB667qqj5rO4fIajzRalaxTFda8aS2xAmQklMcCEXASsO5j0+ufVKiOiG2SIEV2LjjYlUymP7d9+LAZ2I6vR+k/jo2oNoPeq0v68qFd9aOB2ojI9Q/PDFA7Nj1kKMK7KjpxGN5/Ocfr8qrxF1mviA6rPdl8GV3WCFMFKcJER4fRmskWGNE/AdwU3laXvJux/qz4rjiYoJX+5rSyXBDxdznaFiSyN1LYkFJ+nao6HSAmPPyfEPVPRICc6XHMUM4BZOVlJO49M1xg7NFQUtkyVm8+ooDwXCiGEUHDZNw+hCcuUewp0ZXki695D0tESnzi3BE56w7CRySeaNR8psAtL74IUtov9I66GlBEI7HSbyLTT9Fa7+o+ElJWnFqIyW8WzNF3T5fvRv2LfKjYO5KiISlOM03KlETWE1F60TZqW3EbP9WjLhRnovFcJVsNyha+wDVTu44DAylMX4Oh2xKYm2YW+Oi0aeCFmJbDp/TlxYhm5ACYUxma6CVxbEgHkxwjWyFfiNQp2MBL/5HFJGxuny2lVnN8yUSCvDdnOlVTB36/EByY/oA8S+GF/QRYd3PMew56s7aBgPt8mhncN5Cdm+GCD/Nb/ibcuTId9HAaT6o3wMsc7bYusjHGCjFbz9fEdU2MdpLJO+FXVM9E1sEKoTpPLeJDh2a9RUWJQPUCLu8MgEdiJohtEpOtvM7y5+XbuAkYaDsBw3ym5M/kwovN09X1m5x5qM0QSRIVKHf1qo6wo68VMeVQDEBNxJ5/tuZ11qE3siGRfwDnUkCpb9H54+w3zaScPHGAdwplYYwaqnFMwi8nFMtjZvGOLT2wqPLPnKVeQGt4TCVWPXuB4kYnmbTWoJbUT5Wpurcnyn8l6uzLmypCD4k8YiQoDb1b9HIFUAypn580KIUF19eCSGeIHl4hbmusuISxQ1qXk7Ijbj7PiVtMKy5h8rG/c57KJvfvnMQy9hauM5kcZmlTUvrHDw+7cUFB96/wXbvqmcPKGKutgXRqHcTYyBOPEJnSUMBIM2r59wgFjlMuQLrJurzwzox/IEKu/KMilIBDp4k+MHz6NrINWfbV7xa6yAja1kWyvUmwYjCHhlXZmhCb2fmhP1lsnN4BNAkDsdfxHBRCBISy6fuHSY+c4RsokxZ4RomHhVvJsEY/AE4DCvVXDunY8t4ARrQCqXYso3+kVjm6+aelKk+KgyLZ3St0eAIl/Y2xqEXgh0wHGrx3CLZqGqq864f5MmrxiytmlSzHP4RSad20drsN3VchaJZkyrGbKEs6ZJDU2dq5NiC5unqx5tLw6XNRTydIC2PaiVl9m3GLUCh6hQSRJnvcXrqOd8a9K1uV5OoA3TRdc2V5lyxWRIJsdK5KfiAiTsNeM+Tt+Dh2pZjt2l2h4n4BjgYApxG8u10BP1iZ1e1OsCRgLGbgiuXtXrlrjwvJzrB5i11oy9mt3vqgtbjAciQpsQYGGfnVqyGXfEc55hIYWClNAFZDE4MBMGCSqGSIb3DQEJFDEGHgQAYwBhMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE3OTU1MTUwggQUBgkqhkiG9w0BBwagggQFMIIEAQIBADCCA/oGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFEVjuzIvhFF9BzWGr3Ee4cw/mLcqAgMAw1CAggPAwroH+zLRt2Jtb8IWeOaIbXAv4sVGUljreWkJE8dkoXNcEQpATEt5H7L4uwnDsevLi1yfWtUDN1OxM8gb7iR4Jysrd+8uM1r0nn9YStz/I3qhN9Fb6yAb+ENTCzwo/oAnyDBM/lXR9fL0EPHRfsDmK+6kC+hZ4AZIao+1oWRD0Bu970yK6gwv7TIRCsS/RBZfC/d4Slz1+IQChiWS4ttTzxK/IuhaFbia0JYtUpjmMGMBQwYRyvITgYpOIct39Il/mabQ4BA1/wk7Oecfe3RHzIfM49AxJtwKppfVfaRJjtK1aoO/GKS6CZuvIIX8q3Mt32OEaoRN9FJM9EkUkKCcYhtRfq0/8MTO97MbrcKeO8XICn8vZwOMM7k7IFtCq44/3QBXa9fpc2BFMVYOoQ22W2ZuMNMRp6OYc6Da1BG4Ik9mt1T4k9NkvfrhpNceR27v6Q0pZNUTN26aPr11/SfS/IZmLGXF7cGAfxITMOQwK2ig6qivXzvwLxfnyW4aHF7K/jL59kDg9Vf9zKmlvPJpHSEWv53U9SFYvvrMISd6E8np0bHRM5p49mgH/KXGauRRaLWUxlBwrhjeZRimTF9x//a0luGf5tIW8ymi32wn8LNiu7fbnkldnivfgWVmktNrPMH+70HNlCWkfaNibSHpzyDQRTzg9PjHEcFH+pQAXCc+A8y8FSvlT+nx9dpXXRK5pqbrGnWyrm5D3oY1ceO0E85R9Fx4Ss0f+mMBtNDYpz7zS5BSX36MNn0gm6MkhlOVbbcAob4WbZAEM7zaiV1ilLegXPZYPCGQydN02Q+lJ7HHZ18T4mzTrjF6M1PFIx31cR1r0ZtJhkCrOWdlTrmovvYYEgEStsiE3pi6dW4v1NgcJVevpnJJ//vpGXasH9Ue/ZNdk1tj/h7cQ/qbKlmvrcuH/UQ969RsNX+K3B1xeYnfbV88BXqFLuqhuWy38wwvBvKO37vq+ioPNIjwaIyCVzoF9/MAx2aNOdk/x04mSNVYh5q0ZKv+3JC3W2vJxV2aonc/ybFgi2GZz2erVYNZTSXz+bEefx8QWzcW6/zr437jh/peQRyQ92PsN+eZV9GB2lrwmF7K2579vNQoVcpzTvTFf+eZZhF8u/1HZW4uFHRUyqE3rHyOukSFukD7XWnFL1yUcWw/SGNIm1HNZD3nXjqcwdAIXl7OvqdO0z/Qt2bny6KpOSJqjMUjB5AX5/yt2xlZBDhlsoGtRfbSWefGf7qTdpg2T9+ClMb7vS1dLzrGRzNgGc7KO2IQdkNcfj+1MD4wITAJBgUrDgMCGgUABBSoZ3hv7XnZag72Gq3IDQUfHtup5gQUHZH4AQTUUCeOS0WnPOdFYNvm1KUCAwGGoA== + http.p12: MIINZwIBAzCCDSAGCSqGSIb3DQEHAaCCDREEgg0NMIINCTCCBWUGCSqGSIb3DQEHAaCCBVYEggVSMIIFTjCCBUoGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRl7KAO2Y5ZolA3Si0i+pNdXpn42AIDAMNQBIIEyE9fBFRMMy358/KJQcAD9Ts0Xs0TR0UEl/an+IaNTz/9doU6Es6P22roJUK8j4l09I8ptGGKYdeGzrVBzWEjPhGAZ3EXZPHi2Sr/QKbaiWUnYvqqbPVoWNLukrPvK5NpEyPO2ulfxXN46wHzQMnk5l+BjR4wzqKquxgSzacXRJCqznVj59shjLoTK9FtJ3KVEl+JfukcAh/3EqkP7PRAXrPeQ5UcvYbYMZgxw8xHYg/sdKqyHBxwQqNtvGlfGHQ6jyb4/CS2vu0ZehGHQoMgmry2pvNMjA9ypSVWRGspcrdcQOJNgYtHmBiBScoURLB+9KJX2ivY8zJFI5e8Hb48sLASkp4HQemBWMQTukSnlgddsAtIKgpoRZWpcJ7PunHuWXAKZPCMH6uF14G71/lhluRjjy5GEnkKhKkKnlX15kmLmylTZJVdMbMRnsGK7exsVS8ot7sYJ9EMIvKJUqKf/RmZvUxZqlGp1oy3Uo5JgBU5MF61wnkad+L1UJsB2ZzPV0S/jYKPFVzBsWXj9IH74D02TcQz774+FQqAXlVLlpglmlnMwOU3IboKOH2Z4LIj7Kx7wfZZMi3/sQbYJM2PWCd8OS/keDf53ZwMKNxWPh1ZB7kX4mqhmMHdNgRblcWXP3LtWKck31Vq1UdGfK4/T/nudD1ve15NPUP1DvcVsDOWnRF4s3IDXZwXWqvag+hz0zVyB/T0X1XkqrPtBNX/o5qeTDP30W2GVdGL6SIlgZHaqqNuamHlhGra43ExKTwRPBsskTrziC2fb/JeqXxJBES/YufiomXw14BnQUpyBfVeV3cDDEZUnfu7lJz19jS+2aTtA6v9Qnps+q0rNnLa54JLf9bWlw4RomSWcJCqkkW/EG0AdTKrqNFYPZVZTLvt+4B8ehWrUWas8MK5jAXeTklr0ao5acGOKWip1wmqIRKRAIT2OBbs9jCmigb2xJNDK4RdUtDYsJeltJ69DvnG7bmTLjfsOQcVIaI40k91N8nnda9+/6BdKFDQtMDB6efGkciWp9ce24uGUzKszD7CmKTlCJiqn/V2bbOKGdk4Tafy4B2HzeaX+fMFjpWu01UMaJJrvYbAnXww1Yg2IjbwdAMTv7z8zPIJ0a+drouylUfvKKeun6BnLe0fR+XbRRs77Rengb30c1plozEFHZjzmQ10uVQSh1wWURJnVSru6b1pyVI+KR3WZHB4vgDx+BDlQjxCk53+Hxm5wv8SgpvNxVkepPVF8ucut9FkGNHov1gyatlEKSzYlrFt0mFQWg20rKMrkB6pEDO8f5W2InR3znO15NTbw/l3BXYGOe1lS0tHljc5zJkmMTdVrJnFEd2RqNPNmFWEn+1bm4NeAr6QEY9fiyBCMWBHEELTfHtu4iS37D1cBEKudpCszaWJiPgEeDu75+IuXa/guZdxWJj/ktDfZQJpp9ork2QScgu31l7QdGfC24C2E6kQp4UHZ3k7wXSTUt61bdmK7BHqjiz3HuP76phzd7nZxwLCpEg8fhtwhNgPx3IrU1B4JX40Wzsy1Tz/8oIcvjykDmI967chWtw/WSschamGBelNt+TV1gVKoLlMpL9QxFcAqXhEC6Nr9nXRZRJAIRun3Vj+EabZoR2YsdghDE9boTE8MBcGCSqGSIb3DQEJFDEKHggAaAB0AHQAcDAhBgkqhkiG9w0BCRUxFAQSVGltZSAxNjUzOTcyMDczODY4MIIHnAYJKoZIhvcNAQcGoIIHjTCCB4kCAQAwggeCBgkqhkiG9w0BBwEwKQYKKoZIhvcNAQwBBjAbBBRmhTM5a6OsdDd4LLR/07U/28/dqgIDAMNQgIIHSCCLUDdxl9rcX65CAYiQD1mrnoDJe+c8hWww8KI+RD1/3U8skUZ+NHjf2cjCrDQdtVZcycc37lkJ4HEU0keMdVE7I9tja81EfQclnZAUgx/zzLQqVV9qc1AcKX0pzUczLewoQZdXQHdpXh0u8Hf4xFeYM3EAGxB0mUYGwZXWSxYSdaHmxTgeftqNHF6tudt0vpPgq9Rbqp7zP8z48VUOSUkbNTXZOgNVpMgs/yKivvURdWBwJMkpOs/daeR+QbOLkhrhTtT8FjwFUlpnQ//8i7UsBBJKcEKvlrfBEDWcIGw8M6oAssoPsCGyXnsP7ZCVBDBgv941mBTJ9Z9vMoKPpr9jZzSVJrU2+DDuxkfSy1KL0vUvZm5PGSiZA72OpRZkNi8ZUbJTRKf71R+hsCtX/ZUQtMlGCX50XUEQl44cvyX32XQb2VlyGvWu0rqgEVS+QZbuWJoZBZAedhzHvnfGiIsnn2PhRyKBvALyGcWAgK0XvC26WF676g2oMk8sjBrp8saPDvMXj06XmD6746i5KC52gLiRAcwlT4zJoA0OB5jYgxXv+/GP9iXNIK578cCGpBes28b7R+hLDBCc/fMv1jMhKWPVXWJZ6VkcpUgH73uxFl43guTZzJfHI1kMF1+PbOviWPdlSj1D44ajloMJP5FXubIfYEIqV19BdU42ZXZ8ISIZYTAj9OhNCUkkTjjGH2VhFz/FjZDxdk9m/Sw+du8dg1v0+6XIMScjuutbLxxol8Dx1yfRSgZZGN+D3vi0hW1OgcpnUhVI/x48LjdWm1IA0XWOzFiJAe98BiL0roTsUk0pgyujzvLcwDFGP9hnQ0YLdCy22UsQ39hRyQzwGAVO8O49bU8sgNy75+4++8Z3pqI91hdoHyzNMSx6fJn/Qd6UcAdTF0divh17q5bZi+x3D7AQEvh5NwePD0HIqBZexT0yNTVTHragJZUetI5FZgE1cZrfchckP/Ub5jdn3e/Cvu8J/yZFAM8glJvO1D+4BZ+/MVAw3AkO7kLhGeXMXr9s9+A/uPlznoC6b9bpjj3X46bFz7dPIYC0aeya87vISA0/5VPkkUZ+U6A9nLkCIcl5XQElMjrzidFJyBmtxHXLrAu5yiWorl3KVOf9QOrKrZt1UrNihIaSIq/46jI5yBQX6LV7fUBrZKe/oMbuf6W0LliNJbKSwZi0RRHo0jBPotUiOsn1qmnh+hZp6rwi1KGOsCAPSMSGnURwoXAdTUmAyPriDjDBKjm2EiDZJ9T3XgNDHVU24SqKjsSoByrD4FcVyqFAl3w0CaSNXloZswE0UqGKoQUy6Up0ceWoeHYfA/FJyaGfkFGRkmYun+wUJZvhpoLv6bn377CziWTSc0o3nl+UZ4pTsRJOlG0FOxzWApjSd8bPIdezPxak2DM0qj6aiUocfEBMLnFn4Sjj1vVFmIGPNXiOPlJF0Ef99I5Gno3YAd4ZHBqpkeUq7+bWur+xhv5zsXs5ARK6TVOVqlMPiKRpDX7lEQoya++U6HIj6zb7arSZivM5YrZeqHFKK4gpORvpg6icApQCBniDgmNxZJFobgzvIwKTABJjoivHs4zIIw6TCjbz38GEFdzbsUuCXQo3tFWaxgiGkxtLnjYr0PTIxFdBfQ5dkRkkxLvUg7uR1uP9IcmO/8QzzyLeSA+I+teZME8QCzui6CY/lhIfjxJimawejCJx33nS9uXNibQ0my41SmXRDGVgiH6el8veIbEHU9RY+elVR6eqlemCuIHfU8QNPNbe7Gzqaaoccd2VUY3PXNHxU87DC7Nttvn99Ow5zxZ8xZUQVfLFntS9d2hgKp8gJ9lgVKzEuYCiL59wuxbNtnAb8mET0Buw24JeQew9e8DdYL2vDLhQz+IqPXKAhlf7BSpPyQTOeaba657CNmkzdiNk3RHGeTRrq4c3/nl1M+ZsPwf8WxoTcmu+W0Y7/j9nps8r+fKlNB23hOEIWZ4KN+Y4qZRKltTARhqmdjLIhUtWh4D49eTe5sS3MqzsZJJwsEHPPOvZKvOG5UU3jXMg9R4F8CaYgx/M4ClwIIlHvcdW7R7sXke9E/qccIG3jQ5b/mgHCk3pVkAyrRWfBZqXxlfWn+cfzVALtUXWePwhN8+i3CQbjLLOgE6yH3/rBfXQQVYHwrZqoyFchDwlFF5FtF5GThnj04kvhZbq0EcF4lbiULAOiBkJong4Op287QYgq4W8szOn9F2m/4M2XNaI3X7w67GADFHs5TtPXjWx1l6kKIwMM2pcpltXblqgH087payQHx1LnCpztxcxmeoFb3owvwKWmQpV0Gh6CIKfa7hqwCsNggOcKEQWwRJtADEXzPhRYG0mPelWLQMdLLaEzUqh9HElXu3awKazlHa1HkV0nywgldm23DPCKj5Fi6hux7vl7vt8K0Q4KA8Xoys4Pw43eRi9puQM3jOJgxX8Q/MsABHHxPBa94bOsRLFUa/Td70xbHpOrCCp64M7cm6kDKAwPjAhMAkGBSsOAwIaBQAEFEi1rtKgyohIpB9yF4t2L1CpwF+ABBSDiyukmk2pIV5XfqW5AtbEC9LvtQIDAYag +kind: Secret +metadata: + creationTimestamp: null + name: es-cert + namespace: imxc diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml new file mode 100644 index 0000000..d2bff8e --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer + diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/values.yaml new file mode 100644 index 0000000..7b0bd6d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/elasticsearch/values.yaml @@ -0,0 +1,68 @@ +# Default values for sample. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/Chart.yaml new file mode 100644 index 0000000..61a7b7f --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka-manager +version: 0.1.0 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml new file mode 100644 index 0000000..b20900d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-manager + namespace: imxc +spec: + type: NodePort + ports: + - protocol: TCP + port: 80 + nodePort : 32090 + targetPort: 80 + selector: + app: kafka-manager diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml new file mode 100644 index 0000000..4edcf32 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-manager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-manager + template: + metadata: + labels: + app: kafka-manager + spec: + containers: + - name: kafka-manager + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-manager:{{ .Values.global.KAFKA_MANAGER_VERSION }} + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 200m + memory: 1000Mi + ports: + - containerPort: 80 + env: + - name: ZK_HOSTS + value: zookeeper:2181 + command: + - ./bin/kafka-manager + - -Dhttp.port=80 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/values.yaml new file mode 100644 index 0000000..b5532cd --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka-manager/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka/1.broker-config.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/1.broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/1.broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/Chart.yaml new file mode 100644 index 0000000..9565567 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka +version: 0.1.0 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/2.dns.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/2.dns.yaml new file mode 100644 index 0000000..8ffb3f8 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/2.dns.yaml @@ -0,0 +1,14 @@ +# A headless service to create DNS records +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + namespace: imxc +spec: + ports: + - port: 9092 + clusterIP: None + selector: + app: kafka +--- diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml new file mode 100644 index 0000000..1cd7406 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: +# name: bootstrap + name: kafka + namespace: imxc +spec: + ports: + - port: 9092 + selector: + app: kafka diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml new file mode 100644 index 0000000..6f67ab4 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-1 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-2 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +# On-prem/워커노드 두개/브로커 두개 환경에서 발생할 수 있는 affinity 충돌때문에 주석처리 +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-kafka-cluster-3 +# labels: +# type: local +# app: kafka +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.IMXC_KAFKA_PV_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: kafka-broker +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: + # - {{ .Values.global.IMXC_KAFKA_HOST3 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml new file mode 100644 index 0000000..1982584 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml @@ -0,0 +1,132 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + namespace: imxc +spec: + selector: + matchLabels: + app: kafka + serviceName: "kafka-headless" + replicas: 2 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: kafka + annotations: + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kafka + topologyKey: "kubernetes.io/hostname" + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + terminationGracePeriodSeconds: 30 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: extensions + mountPath: /opt/kafka/libs/extensions + containers: + - name: broker + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 6000Mi + limits: + # This limit was intentionally set low as a reminder that + # the entire Yolean/kubernetes-kafka is meant to be tweaked + # before you run production workloads + cpu: 500m + memory: 10000Mi + env: + - name: CLASSPATH + value: /opt/kafka/libs/extensions/* + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + - name: JMX_PORT + value: "5555" + - name: KAFKA_OPTS + value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml + ports: + - name: inside + containerPort: 9092 + - name: outside + containerPort: 9094 + - name: global + containerPort: 9095 + - name: jmx + containerPort: 9010 + command: + - ./bin/kafka-server-start.sh + - /etc/kafka/server.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "rm -rf /var/lib/kafka/data/*;kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] +# readinessProbe: +# tcpSocket: +# port: 9092 +# timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/kafka/data + - name: extensions + mountPath: /opt/kafka/libs/extensions + volumes: + - name: configmap + configMap: + name: broker-config + - name: config + emptyDir: {} + - name: extensions + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: kafka-broker + resources: + requests: + storage: 30Gi diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/6.outside.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/6.outside.yaml new file mode 100644 index 0000000..c2d8170 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/templates/6.outside.yaml @@ -0,0 +1,89 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9094 + port: 32400 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9094 + port: 32401 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9095 + port: 32500 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9095 + port: 32501 + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9094 + name: kafka + protocol: TCP + targetPort: 9094 + selector: + app: kafka +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker-global + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9095 + name: kafka + protocol: TCP + targetPort: 9095 + selector: + app: kafka diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/kafka/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/values.yaml new file mode 100644 index 0000000..cb0e677 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/kafka/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/postgres/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/postgres/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/Chart.yaml new file mode 100644 index 0000000..d602e29 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: postgres +version: 0.1.0 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml new file mode 100644 index 0000000..95c8bda --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-config + namespace: imxc + labels: + app: postgres +data: + POSTGRES_DB: postgresdb + POSTGRES_USER: admin + POSTGRES_PASSWORD: eorbahrhkswp diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml new file mode 100644 index 0000000..dfbd714 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml @@ -0,0 +1,38 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: postgres-pv-volume + labels: + type: local + app: postgres +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: "{{ .Values.global.IMXC_POSTGRES_PV_PATH }}" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postgres-pv-claim + namespace: imxc + labels: + app: postgres +spec: + storageClassName: manual + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml new file mode 100644 index 0000000..31e90a2 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: imxc + labels: + app: postgres +spec: + type: ClusterIP + ports: + - port: 5432 + # nodePort: 5432 + selector: + app: postgres diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml new file mode 100644 index 0000000..14993e8 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml @@ -0,0 +1,45 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Deployment +metadata: + name: postgres + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: postgres +{{- end }} + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: {{ .Values.global.IMXC_IN_REGISTRY }}/postgres:{{ .Values.global.POSTGRES_VERSION }} + resources: + requests: + cpu: 100m + memory: 2000Mi + limits: + cpu: 300m + memory: 2000Mi + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 5432 + args: ["-c","max_connections=1000","-c","shared_buffers=512MB","-c","deadlock_timeout=5s","-c","statement_timeout=15s","-c","idle_in_transaction_session_timeout=60s"] + envFrom: + - configMapRef: + name: postgres-config + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: postgredb + volumes: + - name: postgredb + persistentVolumeClaim: + claimName: postgres-pv-claim diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/postgres/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/values.yaml new file mode 100644 index 0000000..9972ab8 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/postgres/values.yaml @@ -0,0 +1,68 @@ +# Default values for postgres. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.lock b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.lock new file mode 100644 index 0000000..21ff14f --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.8.0 +digest: sha256:3e342a25057f87853e52d83e1d14e6d8727c15fd85aaae22e7594489cc129f15 +generated: "2021-08-09T15:49:41.56962208Z" diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.yaml new file mode 100644 index 0000000..3b08f9c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.8.22 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source message broker software that implements the Advanced Message + Queuing Protocol (AMQP) +home: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: rabbitmq +sources: +- https://github.com/bitnami/bitnami-docker-rabbitmq +- https://www.rabbitmq.com +version: 8.20.5 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/README.md b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/README.md new file mode 100644 index 0000000..9b26b09 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/README.md @@ -0,0 +1,566 @@ +# RabbitMQ + +[RabbitMQ](https://www.rabbitmq.com/) is an open source message broker software that implements the Advanced Message Queuing Protocol (AMQP). + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### RabitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------- | ---------------------- | +| `image.registry` | RabbitMQ image registry | `docker.io` | +| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` | +| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.8.21-debian-10-r13` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + + +### Common parameters + +| Name | Description | Value | +| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `[]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `[]` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.servers` | List of LDAP servers hostnames | `[]` | +| `ldap.port` | LDAP servers port | `389` | +| `ldap.user_dn_pattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind | `cn=${username},dc=example,dc=org` | +| `ldap.tls.enabled` | If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter | `false` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + + +### Statefulset parameters + +| Name | Description | Value | +| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategyType` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the filesystem used by the containers | `1001` | +| `podSecurityContext.runAsUser` | User ID for the service user running the pod | `1001` | +| `containerSecurityContext` | RabbitMQ containers' Security Context | `{}` | +| `resources.limits` | The resources limits for RabbitMQ containers | `{}` | +| `resources.requests` | The requested resources for RabbitMQ containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + + +### RBAC parameters + +| Name | Description | Value | +| ----------------------- | --------------------------------------------------- | ------ | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `rbac.create` | Whether RBAC rules should be created | `true` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | ----------------------------------------------- | --------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessMode` | PVC Access Mode for RabbitMQ data volume | `ReadWriteOnce` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.volumes` | Additional volumes without creating PVC | `[]` | + + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.port` | Amqp port | `5672` | +| `service.portName` | Amqp service port name | `amqp` | +| `service.tlsPort` | Amqp TLS port | `5671` | +| `service.tlsPortName` | Amqp TLS service port name | `amqp-ssl` | +| `service.nodePort` | Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.tlsNodePort` | Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.distPort` | Erlang distribution server port | `25672` | +| `service.distPortName` | Erlang distribution service port name | `dist` | +| `service.distNodePort` | Node port override for `dist` port, if serviceType is `NodePort` | `""` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.managerPort` | RabbitMQ Manager port | `15672` | +| `service.managerPortName` | RabbitMQ Manager service port name | `http-stats` | +| `service.managerNodePort` | Node port override for `http-stats` port, if serviceType `NodePort` | `""` | +| `service.metricsPort` | RabbitMQ Prometheues metrics port | `9419` | +| `service.metricsPortName` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.metricsNodePort` | Node port override for `metrics` port, if serviceType is `NodePort` | `""` | +| `service.epmdNodePort` | Node port override for `epmd` port, if serviceType is `NodePort` | `""` | +| `service.epmdPortName` | EPMD Discovery service port name | `epmd` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.certManager` | Set this to true in order to add the corresponding annotations for cert-manager | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` | + + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------- | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r172` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + + +The above parameters map to the env variables defined in [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + bitnami/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command. + +Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/). + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +* Create a secret with the certificates and associate the secret when deploying the chart +* Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls/). + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](http://www.rabbitmq.com/management.html#load-definitions). + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. + +Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/). + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/). + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512MB" +``` + +* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/). + +### Recover the cluster from complete shutdown + +> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand. + +The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover. + +This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state: + +```console +$ kubectl delete statefulset STATEFULSET_NAME --cascade=false +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests. + +If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod): + +```console +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set clustering.forceBoot=true \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting). + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/). + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/) diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml new file mode 100644 index 0000000..344c403 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.8.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.8.0 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/README.md b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/README.md new file mode 100644 index 0000000..054e51f --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/README.md @@ -0,0 +1,327 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for policy | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..ae45d5e --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl @@ -0,0 +1,117 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for policy. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..f905f20 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl @@ -0,0 +1,55 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..60b84a7 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..1e5bba9 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..18d9813 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (not $existingSecretValue) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml new file mode 100644 index 0000000..de92d88 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml @@ -0,0 +1,4 @@ +tolerations: + - key: foo + operator: "Equal" + value: bar diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt new file mode 100644 index 0000000..24ffa89 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt @@ -0,0 +1,167 @@ +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.port .Values.service.tlsPort -}} +{{- $serviceNodePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.nodePort .Values.service.tlsNodePort -}} +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $serviceNodePort }} at {{ include "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clustering.k8s_domain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "rabbitmq.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.metricsPort }}:{{ .Values.service.metricsPort }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.metricsPort }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "rabbitmq.validateValues" . -}} + +{{- $requiredPassword := list -}} +{{- $secretNameRabbitmq := include "rabbitmq.secretPasswordName" . -}} + +{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) -}} + {{- $requiredRabbitmqPassword := dict "valueKey" "auth.password" "secret" $secretNameRabbitmq "field" "rabbitmq-password" -}} + {{- $requiredPassword = append $requiredPassword $requiredRabbitmqPassword -}} +{{- end -}} + +{{- if not .Values.auth.existingErlangSecret -}} + {{- $requiredErlangPassword := dict "valueKey" "auth.erlangCookie" "secret" $secretNameRabbitmq "field" "rabbitmq-erlang-cookie" -}} + {{- $requiredPassword = append $requiredPassword $requiredErlangPassword -}} +{{- end -}} + +{{- $requiredRabbitmqPasswordErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPassword "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $requiredRabbitmqPasswordErrors) "context" $) -}} + +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl new file mode 100644 index 0000000..6b46b23 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,247 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "rabbitmq.name" -}} +{{- include "common.names.name" . -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rabbitmq.fullname" -}} +{{- include "common.names.fullname" . -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "rabbitmq.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "rabbitmq.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 o base 10 number system. +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} +{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }} +{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }} +{{- if eq $unit "Ki" }} + {{- mul $value 1024 }} +{{- else if eq $unit "Mi" }} + {{- mul $value 1024 1024 }} +{{- else if eq $unit "Gi" }} + {{- mul $value 1024 1024 1024 }} +{{- else if eq $unit "Ti" }} + {{- mul $value 1024 1024 1024 1024 }} +{{- else if eq $unit "Pi" }} + {{- mul $value 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "Ei" }} + {{- mul $value 1024 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "K" }} + {{- mul $value 1000 }} +{{- else if eq $unit "M" }} + {{- mul $value 1000 1000 }} +{{- else if eq $unit "G" }} + {{- mul $value 1000 1000 1000 }} +{{- else if eq $unit "T" }} + {{- mul $value 1000 1000 1000 1000 }} +{{- else if eq $unit "P" }} + {{- mul $value 1000 1000 1000 1000 1000 }} +{{- else if eq $unit "E" }} + {{- mul $value 1000 1000 1000 1000 1000 1000 }} +{{- end }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- if or (not (gt $serversListLength 0)) (not (and .Values.ldap.port .Values.ldap.user_dn_pattern)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers", + "ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]="lmy-ldap-server" \ + --set ldap.port="389" \ + --set user_dn_pattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not .Values.ingress.certManager) (not .Values.ingress.selfSigned) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Relay on cert-manager to create it by setting `ingress.certManager=true` + - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml new file mode 100644 index 0000000..5ba6b72 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-config + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }} + {{- if .Values.advancedConfiguration}} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | nindent 4 }} + {{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml new file mode 100644 index 0000000..db74e50 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml @@ -0,0 +1,57 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.managerPortName "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http-stats" "context" $) | nindent 14 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 0000000..158aeaa --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: 4369 # EPMD + - port: {{ .Values.service.port }} + - port: {{ .Values.service.tlsPort }} + - port: {{ .Values.service.distPort }} + - port: {{ .Values.service.managerPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "rabbitmq.fullname" . }}-client: "true" + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if .Values.networkPolicy.additionalRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }} + {{- end }} + {{- end }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.service.metricsPort }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml new file mode 100644 index 0000000..bf06b66 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.pdb.create }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 0000000..a1ba629 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "rabbitmq.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml new file mode 100644 index 0000000..d0f8bdd --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml @@ -0,0 +1,22 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: rabbitmq-pv + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: {{ .Values.global.RABBITMQ_PATH }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml new file mode 100644 index 0000000..c677752 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: rabbitmq-pvc + namespace: imxc + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi + diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/role.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/role.yaml new file mode 100644 index 0000000..9bd029e --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 0000000..74f82f0 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml new file mode 100644 index 0000000..4d14e4e --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml @@ -0,0 +1,43 @@ +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) }} + {{- if .Values.auth.password }} + rabbitmq-password: {{ .Values.auth.password | b64enc | quote }} + {{- else }} + rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if not .Values.auth.existingErlangSecret }} + {{- if .Values.auth.erlangCookie }} + rabbitmq-erlang-cookie: {{ .Values.auth.erlangCookie | b64enc | quote }} + {{- else }} + rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- $extraSecretsPrependReleaseName := .Values.extraSecretsPrependReleaseName }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + {{- if $extraSecretsPrependReleaseName }} + name: {{ $.Release.Name }}-{{ $key }} + {{- else }} + name: {{ $key }} + {{- end }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 0000000..562fde9 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +secrets: + - name: {{ include "rabbitmq.fullname" . }} +{{- end }} + diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 0000000..46b9040 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + {{- with .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml new file mode 100644 index 0000000..45abd14 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,382 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.statefulsetLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.statefulsetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "rabbitmq.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.updateStrategyType }} + {{- if (eq "OnDelete" .Values.updateStrategyType) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configuration.yaml") . | sha256sum }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + {{- include "rabbitmq.podAnnotations" . | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- if or (.Values.initContainers) (and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext }} + securityContext: {{- toYaml .Values.containerSecurityContext | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ template "rabbitmq.fullname" . }}-headless" + - name: K8S_ADDRESS_TYPE + value: {{ .Values.clustering.addressType }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + {{- if .Values.loadDefinition.enabled }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "yes" + - name: RABBITMQ_SECURE_PASSWORD + value: "no" + {{- else }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + {{- end }} + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + containerPort: 5672 + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-ssl + containerPort: {{ .Values.service.tlsPort }} + {{- end }} + - name: dist + containerPort: 25672 + - name: stats + containerPort: 15672 + - name: epmd + containerPort: 4369 + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: 9419 + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- toYaml .Values.extraContainerPorts | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + - name: stomp + containerPort: 61613 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + lifecycle: + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + postStart: + exec: + command: + - /bin/bash + - -ec + - | + until rabbitmqctl cluster_status >/dev/null; do + echo "Waiting for cluster readiness..." + sleep 5 + done + rabbitmq-queues rebalance "all" + {{- end }} + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + resources: + requests: + memory: "500Mi" + cpu: "150m" + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.persistence.volumes }} + {{- toYaml .Values.persistence.volumes | nindent 8 }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + secret: + secretName: {{ template "rabbitmq.tlsSecretName" . }} + items: + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- end }} + - name: configuration + configMap: + name: {{ template "rabbitmq.fullname" . }}-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + {{- if .Values.advancedConfiguration}} + - key: advanced.config + path: advanced.config + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if not (contains "data" (quote .Values.persistence.volumes)) }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 0000000..4ed26cc --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }}-headless + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if or (.Values.service.annotationsHeadless) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotationsHeadless}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotationsHeadless "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.portName }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml new file mode 100644 index 0000000..2b4c224 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.service.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or (.Values.service.annotations) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-ssl + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.tlsNodePort)) }} + nodePort: {{ .Values.service.tlsNodePort }} + {{- end }} + {{- end }} + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.epmdNodePort))) }} + nodePort: {{ .Values.service.epmdNodePort }} + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.distNodePort))) }} + nodePort: {{ .Values.service.distNodePort }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.managerNodePort))) }} + nodePort: {{ .Values.service.managerNodePort }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.metricsPortName }} + port: {{ .Values.service.metricsPort }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.metricsNodePort))) }} + nodePort: {{ .Values.service.metricsNodePort }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml new file mode 100644 index 0000000..b6a6078 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml @@ -0,0 +1,74 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls" .Values.ingress.hostname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +--- +{{- end }} +{{- end }} +{{- if (include "rabbitmq.createTlsSecret" . )}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }}-certs + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if or (not .Values.auth.tls.autoGenerated ) (and .Values.auth.tls.caCertificate .Values.auth.tls.serverCertificate .Values.auth.tls.serverKey) }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate| b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 365 }} + {{- $fullname := include "rabbitmq.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "rabbitmq.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.schema.json b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.schema.json new file mode 100644 index 0000000..8ef33ef --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + } + } + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Prometheus metrics for RabbitMQ", + "description": "Install Prometheus plugin in the RabbitMQ container", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.yaml new file mode 100644 index 0000000..5b74e6c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/rabbitmq/values.yaml @@ -0,0 +1,1151 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +## @section RabitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry RabbitMQ image registry +## @param image.repository RabbitMQ image repository +## @param image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: rabbitmq + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} + + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + +## @section Common parameters + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" + +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" + +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + password: "eorbahrhkswp" + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + erlangCookie: "pf6t82zTrqY9iaupUmkPOJxPXjmjiNEd" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + existingSecret: "" + existingSecretFullChain: false + +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" + +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65536" + +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" + +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256MB + ## + value: 0.4 + +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s rabbitmq_stomp" + +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" + +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap rabbitmq_stomp" + +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: false + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" + +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +args: [] + +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 + +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] + +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" + +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] + +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## +configuration: |- + {{- if not .Values.loadDefinition.enabled -}} + ## Username and password + ## + default_user = {{ .Values.auth.username }} + default_pass = eorbahrhkswp + {{- end }} + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }} + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + {{- end }} + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + {{ tpl .Values.extraConfiguration . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.tlsPort }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1 = rabbit_auth_backend_ldap + auth_backends.2 = internal + {{- range $index, $server := .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ .Values.ldap.port }} + auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + ## Prometheus metrics + ## + prometheus.tcp.port = 9419 + {{- end }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json + +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. +## advancedConfiguration: |- +## [{ +## rabbitmq_auth_backend_ldap, +## [{ +## ssl_options, +## [{ +## verify, verify_none +## }, { +## fail_if_no_peer_cert, +## false +## }] +## ]} +## }]. +## +advancedConfiguration: |- + +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.servers List of LDAP servers hostnames + ## + servers: [] + ## @param ldap.port LDAP servers port + ## + port: "389" + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.user_dn_pattern Pattern used to translate the provided username into a value to be used for the LDAP bind + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + ## @param ldap.tls.enabled If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter + ## + enabled: false + +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] + +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false + +## @section Statefulset parameters + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 + +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady + +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## @param updateStrategyType Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategyType: RollingUpdate + +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} + +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" + +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} + +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: {} + +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroup Group ID for the filesystem used by the containers +## @param podSecurityContext.runAsUser User ID for the service user running the pod +## +podSecurityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## @param containerSecurityContext RabbitMQ containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## RabbitMQ containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + ## Example: + ## limits: + ## cpu: 1000m + ## memory: 2Gi + limits: {} + ## Examples: + ## requests: + ## cpu: 1000m + ## memory: 2Gi + requests: {} + +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} + +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} + +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} + +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] + +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" + +## @section RBAC parameters + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + +## @section Persistence parameters + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "rabbitmq" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessMode PVC Access Mode for RabbitMQ data volume + ## + accessMode: ReadWriteOnce + + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "rabbitmq-pvc" + + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 5Gi + + ## @param persistence.volumes Additional volumes without creating PVC + ## - name: volume_name + ## emptyDir: {} + ## + volumes: [] + +## @section Exposure parameters + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + # type: NodePort + type: ClusterIP + + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + portEnabled: true + + ## @param service.port Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## @param service.portName Amqp service port name + ## + portName: amqp + + ## @param service.tlsPort Amqp TLS port + ## + tlsPort: 5671 + + ## @param service.tlsPortName Amqp TLS service port name + ## + tlsPortName: amqp-ssl + + ## @param service.nodePort Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## e.g: + ## nodePort: 30672 + ## + nodePort: "" + + ## @param service.tlsNodePort Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` + ## e.g: + ## tlsNodePort: 30671 + ## + tlsNodePort: "" + + ## @param service.distPort Erlang distribution server port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## @param service.distPortName Erlang distribution service port name + ## + distPortName: dist + + ## @param service.distNodePort Node port override for `dist` port, if serviceType is `NodePort` + ## e.g: + ## distNodePort: 30676 + ## + distNodePort: "" + + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPortEnabled: true + + ## @param service.managerPort RabbitMQ Manager port + ## + managerPort: 15672 + + ## @param service.managerPortName RabbitMQ Manager service port name + ## + managerPortName: http-stats + + ## @param service.managerNodePort Node port override for `http-stats` port, if serviceType `NodePort` + ## e.g: + ## managerNodePort: 30673 + ## + managerNodePort: "" + + ## @param service.metricsPort RabbitMQ Prometheues metrics port + ## + metricsPort: 9419 + + ## @param service.metricsPortName RabbitMQ Prometheues metrics service port name + ## + metricsPortName: metrics + + ## @param service.metricsNodePort Node port override for `metrics` port, if serviceType is `NodePort` + ## e.g: + ## metricsNodePort: 30674 + ## + metricsNodePort: "" + + ## @param service.epmdNodePort Node port override for `epmd` port, if serviceType is `NodePort` + ## e.g: + ## epmdNodePort: 30675 + ## + epmdNodePort: "" + + ## @param service.epmdPortName EPMD Discovery service port name + ## + epmdPortName: epmd + + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: + - name: stomp + port: 61613 + targetPort: 61613 + #nodePort: 31613 + + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + + ## @param ingress.annotations Ingress annotations + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + ## + annotations: {} + + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Relay on cert-manager to create it by setting `ingress.certManager=true` + ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + + ## @param ingress.certManager Set this to true in order to add the corresponding annotations for cert-manager + ## to generate a TLS secret for the ingress record + ## + certManager: false + + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.additionalRules Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## e.g: + ## additionalRules: + ## - matchLabels: + ## - role: frontend + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + additionalRules: [] + +## @section Metrics Parameters + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.metricsPort }}" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Specify Metric Relabellings to add to the scrape endpoint + ## + relabellings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Used to pass Labels that are required by the installed Prometheus Operator + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.path Define the path used by ServiceMonitor to scrap metrics + ## Could be /metrics for aggregated metrics or /metrics/per-object for more details + path: "" + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] + +## @section Init Container Parameters + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: 10.10.31.243:5000/cmoa3 # docker.io + repository: bitnami-shell # bitnami/bitnami-shell + tag: 10-debian-10-r175 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/redis/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.lock b/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.lock new file mode 100644 index 0000000..ee0ecb7 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.3.3 +digest: sha256:264db18c8d0962b5c4340840f62306f45fe8d2c1c8999dd41c0f2d62fc93a220 +generated: "2021-01-15T00:05:10.125742807Z" diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.yaml new file mode 100644 index 0000000..6924d59 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 6.0.10 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: https://github.com/bitnami/charts/tree/master/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +- http://redis.io/ +version: 12.7.0 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/README.md b/roles/cmoa_demo_install/files/02-base/base/charts/redis/README.md new file mode 100644 index 0000000..3befa8c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/README.md @@ -0,0 +1,707 @@ +# RedisTM Chart packaged by Bitnami + +[RedisTM](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +Disclaimer: REDIS® is a registered trademark of Redis Labs Ltd.Any rights therein are reserved to Redis Labs Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Labs Ltd. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis +``` + +## Introduction + +This chart bootstraps a [RedisTM](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +### Choose between RedisTM Helm Chart and RedisTM Cluster Helm Chart + +You can choose any of the two RedisTM Helm charts for deploying a RedisTM cluster. +While [RedisTM Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) will deploy a master-slave cluster using RedisTM Sentinel, the [RedisTM Cluster Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) will deploy a RedisTM Cluster topology with sharding. +The main features of each chart are the following: + +| RedisTM | RedisTM Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![RedisTM Topology](img/redis-topology.png) | ![RedisTM Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/redis +``` + +The command deploys RedisTM on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the RedisTM chart and their default values. + +| Parameter | Description | Default | +|:------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | RedisTM password (overrides `password`) | `nil` | +| `image.registry` | RedisTM Image registry | `docker.io` | +| `image.repository` | RedisTM Image name | `bitnami/redis` | +| `image.tag` | RedisTM Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `2` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | RedisTM password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common RedisTM node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable security context (both redis master and slave containers) | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container (both redis master and slave containers) | `1001` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Specifies annotations to add to ServiceAccount. | `nil` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | RedisTM exporter image registry | `docker.io` | +| `metrics.image.repository` | RedisTM exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | RedisTM exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.serviceMonitor.relabelings` | ServiceMonitor relabelings. Value is evaluated as a template | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | ServiceMonitor metricRelabelings. Value is evaluated as a template | `[]` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.hostAliases` | Add deployment host aliases | `[]` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.persistence.volumes` | Additional volumes without creating PVC | `{}` | +| `master.statefulset.labels` | Additional labels for redis master StatefulSet | `{}` | +| `master.statefulset.annotations` | Additional annotations for redis master StatefulSet | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.statefulset.volumeClaimTemplates.labels` | Additional labels for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.podLabels` | Additional labels for RedisTM master pod | {} | +| `master.podAnnotations` | Additional annotations for RedisTM master pod | {} | +| `master.extraEnvVars` | Additional Environment Variables passed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the master's stateful set | `[]` | +| `podDisruptionBudget.enabled` | Pod Disruption Budget toggle | `false` | +| `podDisruptionBudget.minAvailable` | Minimum available pods | `1` | +| `podDisruptionBudget.maxUnavailable` | Maximum unavailable | `nil` | +| `redisPort` | RedisTM port (in both master and slaves) | `6379` | +| `tls.enabled` | Enable TLS support for replication traffic | `false` | +| `tls.authClients` | Require clients to authenticate or not | `true` | +| `tls.certificatesSecret` | Name of the secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `nil` | +| `tls.certKeyFilename` | Certificate key filename | `nil` | +| `tls.certCAFilename` | CA Certificate filename | `nil` | +| `tls.dhParamsFilename` | DH params (in order to support DH based ciphers) | `nil` | +| `master.command` | RedisTM master entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `master.preExecCmds` | Text to inset into the startup script immediately prior to `master.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `master.configmap` | Additional RedisTM configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of RedisTM commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | RedisTM master additional command line flags | [] | +| `master.nodeSelector` | RedisTM master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for RedisTM master pod assignment | [] | +| `master.affinity` | Affinity settings for RedisTM master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | RedisTM master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `5` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.shareProcessNamespace` | RedisTM Master pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `master.priorityClassName` | RedisTM Master pod priorityClassName | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `volumePermissions.securityContext.*` | Security context of the init container | `{}` | +| `volumePermissions.securityContext.runAsUser` | UserID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | 0 | +| `slave.hostAliases` | Add deployment host aliases | `[]` | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.command` | RedisTM slave entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `slave.preExecCmds` | Text to inset into the startup script immediately prior to `slave.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `slave.configmap` | Additional RedisTM configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of RedisTM commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | RedisTM slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `5` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `1` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.shareProcessNamespace` | RedisTM slave pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.labels` | Additional labels for redis slave StatefulSet | `{}` | +| `slave.statefulset.annotations` | Additional annotations for redis slave StatefulSet | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.statefulset.volumeClaimTemplates.labels` | Additional labels for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.extraEnvVars` | Additional Environment Variables passed to the pod of the slave's stateful set set | `[]` | +| `slave.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the slave's stateful set set | `[]` | +| `masslaveter.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the slave's stateful set | `[]` | +| `slave.podLabels` | Additional labels for RedisTM slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for RedisTM slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | RedisTM slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.tolerations` | Toleration labels for RedisTM slave pod assignment | [] | +| `slave.spreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for RedisTM slave pod | {} | +| `slave.priorityClassName` | RedisTM Slave pod priorityClassName | `nil` | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a RedisTM node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | RedisTM Sentinel port | `26379` | +| `sentinel.configmap` | Additional RedisTM configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for RedisTM read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for RedisTM sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for RedisTM read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for RedisTM sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if RedisTM sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | RedisTM sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | RedisTM Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | RedisTM Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | RedisTM Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sentinel.extraEnvVars` | Additional Environment Variables passed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the sentinel node statefulset | `[]` | +| `sentinel.preExecCmds` | Text to inset into the startup script immediately prior to `sentinel.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + bitnami/redis +``` + +The above command sets the RedisTM server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the RedisTM pod as it attempts to write to the `/bitnami` directory. Consider installing RedisTM with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change RedisTM version + +To modify the RedisTM version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/redis/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master node allowed) and a RedisTM slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - RedisTM Master service: Points to the master, where read-write operations can be performed + - RedisTM Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master allowed) and a RedisTM slave StatefulSet. In this case, the pods will contain an extra container with RedisTM Sentinel. This container will form a cluster of RedisTM Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - RedisTM service: Exposes port 6379 for RedisTM read-only operations and port 26379 for accessing RedisTM Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the RedisTM Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for RedisTM you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +For example: + +First, create the secret with the cetificates files: + +```console +kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem +``` + +Then, use the following parameters: + +```console +tls.enabled="true" +tls.certificatesSecret="certificates-tls-secret" +tls.certFilename="cert.pem" +tls.certKeyFilename="cert.key" +tls.certCAFilename="ca.pem" +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +RedisTM may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: + +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/redis +``` + +## Backup and restore + +### Backup + +To perform a backup you will need to connect to one of the nodes and execute: + +```bash +$ kubectl exec -it my-redis-master-0 bash + +$ redis-cli +127.0.0.1:6379> auth your_current_redis_password +OK +127.0.0.1:6379> save +OK +``` + +Then you will need to get the created dump file form the redis node: + +```bash +$ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis +``` + +### Restore + +To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. + +Follow the following steps: + +- First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step. + +```yaml +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly no + # Disable RDB persistence, AOF persistence already enabled. + save "" +``` + +- Start the new cluster to create the PVCs. + +For example, : + +```bash +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +- Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod. + +``` +$ helm delete new-redis + +$ kubectl run --generator=run-pod/v1 -i --rm --tty volpod --overrides=' +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "redisvolpod" + }, + "spec": { + "containers": [{ + "command": [ + "tail", + "-f", + "/dev/null" + ], + "image": "bitnami/minideb", + "name": "mycontainer", + "volumeMounts": [{ + "mountPath": "/mnt", + "name": "redisdata" + }] + }], + "restartPolicy": "Never", + "volumes": [{ + "name": "redisdata", + "persistentVolumeClaim": { + "claimName": "redis-data-new-redis-master-0" + } + }] + } +}' --image="bitnami/minideb" + +$ kubectl cp dump.rdb redisvolpod:/mnt/dump.rdb +$ kubectl delete pod volpod +``` + +- Start again the cluster: + +``` +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +## NetworkPolicy + +To enable network policy for RedisTM, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to RedisTM. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the RedisTM Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the RedisTM Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release bitnami/redis --set persistence.existingClaim= + ``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the RedisTM Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install bitnami/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use RedisTM Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the RedisTM StatefulSet before upgrading: + +```bash +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the RedisTM slave (and metrics if enabled) deployment: + +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Upgrading + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.0.0 + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the RedisTM exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 7.0.0 + +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling RedisTM Sentinel containers inside of the RedisTM Pods (feature disabled by default). In case the master crashes, a new RedisTM node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/Chart.yaml new file mode 100644 index 0000000..ceb5648 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.3.3 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.3.3 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/README.md b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/README.md new file mode 100644 index 0000000..461fdc9 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/README.md @@ -0,0 +1,316 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|--------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------|----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|-------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for RedisTM are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..1ff26d5 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..d95b569 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl new file mode 100644 index 0000000..aafde9f --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..622ef50 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,42 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if typeIs "int" .servicePort }} + number: {{ .servicePort }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4931d94 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,127 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- $name = .name -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..77bcc2b --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a786188 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..3e2a47c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,72 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis(TM) required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $existingSecret := include "common.redis.values.existingSecret" . -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $valueKeyRedisPassword := printf "%s%s" $valueKeyPrefix "password" -}} + {{- $valueKeyRedisUsePassword := printf "%s%s" $valueKeyPrefix "usePassword" -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $usePassword := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUsePassword "context" .context) -}} + {{- if eq $usePassword "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Redis Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.redis.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Redis(TM) is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.redis.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..fb2fe60 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/default-values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..7efeda3 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,682 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: redis # bitnami/redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-sentinel # bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + service: + ## Redis(TM) Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + service: + ## Redis(TM) Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: {} + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + service: + ## Redis(TM) Slave Service type + type: ClusterIP + ## Redis(TM) port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + extraFlags: [] + ## List of Redis(TM) commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-exporter # bitnami/redis-exporter + tag: 1.5.3-debian-10-r14 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/NOTES.txt b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..a254f58 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/NOTES.txt @@ -0,0 +1,136 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis(TM) service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if and .Values.sentinel.enabled (not .Values.cluster.enabled)}} + +------------------------------------------------------------------------------- + WARNING + + Using redis sentinel without a cluster is not supported. A single pod with + standalone redis has been deployed. + + To deploy redis sentinel, please use the values "cluster.enabled=true" and + "sentinel.enabled=true". + +------------------------------------------------------------------------------- +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis(TM) Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.imxc.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace imxc {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis(TM) server: + +1. Run a Redis(TM) pod that you can use as a client: + +{{- if .Values.tls.enabled }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image {{ template "redis.image" . }} --command -- sleep infinity + + Copy your TLS certificates to the pod: + + kubectl cp --namespace imxc /path/to/client.cert {{ template "redis.fullname" . }}-client:/tmp/client.cert + kubectl cp --namespace imxc /path/to/client.key {{ template "redis.fullname" . }}-client:/tmp/client.key + kubectl cp --namespace imxc /path/to/CA.cert {{ template "redis.fullname" . }}-client:/tmp/CA.cert + + Use the following command to attach to the pod: + + kubectl exec --tty -i {{ template "redis.fullname" . }}-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --namespace imxc -- bash +{{- else }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash +{{- end }} + +2. Connect using the Redis(TM) CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} + +{{- include "redis.validateValues" . }} \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/_helpers.tpl b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..193105d --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/_helpers.tpl @@ -0,0 +1,421 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis(TM) secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis(TM) password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis(TM) - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.spreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.slave.spreadConstraints -}} +redis: spreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "redis.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "redis.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml new file mode 100644 index 0000000..02411c8 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml @@ -0,0 +1,393 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-scripts + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + echo "I am master" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + else + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the redis + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "slave" ]]; then + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + else + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + fi + + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" + } + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + REDIS_MASTER_HOST="$(hostname -i)" + REDIS_MASTER_PORT_NUMBER="{{ .Values.redisPort }}" + else + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the sentinel + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_replica() { + if [[ "$1" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $1 {{ .Values.redisPort }}" + fi + } + + {{- if .Values.sentinel.staticID }} + # remove generated known sentinels and replicas + tmp="$(sed -e '/^sentinel known-/d' -e '/^$/d' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + echo "$tmp" > /opt/bitnami/redis-sentinel/etc/sentinel.conf + + for node in $(seq 0 {{ .Values.cluster.slaveCount }}); do + NAME="{{ template "redis.fullname" . }}-node-$node" + IP="$(getent hosts "$NAME.$HEADLESS_SERVICE" | awk ' {print $1 }')" + if [[ "$NAME" != "$HOSTNAME" && -n "$IP" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $IP {{ .Values.sentinel.port }} $(host_id "$NAME")" + add_replica "$IP" + fi + done + add_replica "$(hostname -i)" + {{- end }} + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} +{{- else }} + start-master.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if .Values.cluster.enabled }} + start-slave.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} + +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..923272c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/configmap.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{- tpl .Values.configmap . | nindent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{- tpl .Values.master.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{- tpl .Values.slave.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-node-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{- tpl .Values.sentinel.configmap . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/headless-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..7db7371 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/health-configmap.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..0bbbfb6 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..928f9a8 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,39 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} +# {{- if .Values.metrics.serviceMonitor.namespace }} +# namespace: {{ .Values.metrics.serviceMonitor.namespace }} +# {{- else }} + namespace: imxc +# {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "metrics" + namespaceSelector: + matchNames: + - imxc +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..4dae3bc --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "metrics" + {{- if .Values.metrics.service.labels -}} + {{- toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{- toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{ if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..ae27ebb --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,74 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/pdb.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/pdb.yaml new file mode 100644 index 0000000..e2ad471 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/pdb.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..fba6450 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: imxc + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/psp.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..f3c9390 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/psp.yaml @@ -0,0 +1,43 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..78aa2e6 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,378 @@ +{{- if or (not .Values.cluster.enabled) (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.statefulset.labels }} + {{- toYaml .Values.master.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.master.statefulset.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.annotations | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master + {{- if .Values.master.podLabels }} + {{- toYaml .Values.master.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- toYaml .Values.master.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- with .Values.master.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.master.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.master.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ include "redis.tplValue" (dict "value" .Values.persistence.existingClaim "context" $) }} + {{- end }} + {{- if .Values.master.persistence.volumes }} + {{- toYaml .Values.master.persistence.volumes | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.master.persistence.volumes) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + {{- if .Values.master.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.master.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.master.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.master.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..56ba5f1 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,43 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{- toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{- toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{ if eq .Values.master.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{- toYaml . | nindent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml new file mode 100644 index 0000000..5d697de --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml @@ -0,0 +1,494 @@ +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-node + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: node + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: node + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + env: + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.slave.persistence.path }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.sentinel.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: {{ template "sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.port | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.dhParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.sentinel.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.sentinel.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-pv.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-pv.yaml new file mode 100644 index 0000000..adb5416 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-pv.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-master +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-master-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-0 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-1 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-1 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-role.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..0d14129 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-role.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{- toYaml .Values.rbac.role.rules | nindent 2 }} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..83c87f5 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..9452003 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..be0894b --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,384 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName | quote }} + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.slave.shareProcessNamespace }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-slave.sh + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.slave.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} + envFrom: + {{- if .Values.slave.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.slave.extraEnvVarsCM }} + {{- end }} + {{- if .Values.slave.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.slave.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.readinessProbe.timeoutSeconds }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..c1f3ae5 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{- toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: {{- toYaml .Values.slave.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{ if eq .Values.slave.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.slave.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..3b3458e --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,43 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{- toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: {{- toYaml .Values.sentinel.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy }} + {{- end }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/secret.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..c1103d2 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.schema.json b/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.schema.json new file mode 100644 index 0000000..3188d0c --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "slave/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.yaml new file mode 100644 index 0000000..fcd8710 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/redis/values.yaml @@ -0,0 +1,932 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: latest + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +fullnameOverride: redis + +## Cluster settings +## +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +## +sentinel: + enabled: false + #enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + ## + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + #registry: docker.io + registry: 10.10.31.243:5000 + repository: bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.10-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + ## + service: + ## Redis(TM) Sentinel Service type + ## + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + + ## Additional commands to run prior to starting Redis(TM) node with sentinel + ## + preExecCmds: "" + + ## An array to add extra env var to the sentinel node configurations + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + #enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + ## Add annotations to service account + # annotations: + # iam.gke.io/gcp-service-account: "sa@project.iam.gserviceaccount.com" + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +## +securityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Use password authentication +## +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "dkagh1234!" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +## +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + ## + service: + ## Redis(TM) Master Service type + ## + type: ClusterIP + # type: NodePort + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31379 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + volumes: + # - name: volume_name + # emptyDir: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: '' + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + ## + service: + ## Redis(TM) Slave Service type + ## + type: ClusterIP + #type: NodePort + ## Redis(TM) port + ## + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31380 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + ## + port: 6379 + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + ## + extraFlags: [] + ## List of Redis(TM) commands to disable + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: '' + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false +# enabled: true + + image: + registry: 10.10.31.243:5000 # registry.cloud.intermax:5000 + repository: redis/redis-exporter + #tag: 1.15.1-debian-10-r2 + tag: latest + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + ## + selector: + prometheus: kube-prometheus + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + relabelings: [] + + ## MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + metricRelabelings: [] + # - sourceLabels: + # - "__name__" + # targetLabel: "__name__" + # action: replace + # regex: '(.*)' + # replacement: 'example_prefix_$1' + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current redis service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: '' + service: + type: ClusterIP + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false + ## + securityContext: + runAsUser: 0 + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/.helmignore b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/Chart.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/Chart.yaml new file mode 100644 index 0000000..c9a2bfb --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: zookeeper +version: 0.1.0 diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml new file mode 100644 index 0000000..3b23a9e --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml @@ -0,0 +1,35 @@ +kind: ConfigMap +metadata: + name: zookeeper-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + [ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data + [ -z "$ID_OFFSET" ] && ID_OFFSET=1 + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET)) + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid + cp -Lur /etc/kafka-configmap/* /etc/kafka/ + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties + zookeeper.properties: |- + tickTime=2000 + dataDir=/var/lib/zookeeper/data + dataLogDir=/var/lib/zookeeper/log + clientPort=2181 + maxClientCnxns=1 + initLimit=5 + syncLimit=2 + server.1=zookeeper-0.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.2=zookeeper-1.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.3=zookeeper-2.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + log4j.properties: |- + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + # Suppress connection log messages, three lines per livenessProbe execution + log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN + log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml new file mode 100644 index 0000000..422433a --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: zookeeper-headless + namespace: imxc +spec: + ports: + - port: 2888 + name: peer + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + storage: persistent + diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml new file mode 100644 index 0000000..9fdcf95 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml @@ -0,0 +1,12 @@ +# the headless service is for PetSet DNS, this one is for clients +apiVersion: v1 +kind: Service +metadata: + name: zookeeper + namespace: imxc +spec: + ports: + - port: 2181 + name: client + selector: + app: zookeeper diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml new file mode 100644 index 0000000..2a909f7 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-1 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-2 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-3 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml new file mode 100644 index 0000000..a9e5cb8 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: zookeeper + namespace: imxc +spec: + selector: + matchLabels: + app: zookeeper + storage: persistent + serviceName: "zookeeper-headless" + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + storage: persistent + annotations: + spec: + terminationGracePeriodSeconds: 10 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + containers: + - name: zookeeper + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 200Mi + limits: + cpu: 200m + memory: 500Mi + env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + command: + - ./bin/zookeeper-server-start.sh + - /etc/kafka/zookeeper.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election +# readinessProbe: +# exec: +# command: +# - /bin/sh +# - -c +# - '[ "imok" = "$(echo ruok | nc -w 1 -q 1 127.0.0.1 2181)" ]' + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + volumes: + - name: configmap + configMap: + name: zookeeper-config + - name: config + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: zookeeper-storage + resources: + requests: + storage: 30Gi diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml new file mode 100644 index 0000000..e08ed54 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml @@ -0,0 +1,50 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-2 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/values.yaml b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/values.yaml new file mode 100644 index 0000000..7b06985 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/charts/zookeeper/values.yaml @@ -0,0 +1,68 @@ +# Default values for zookeeper. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_demo_install/files/02-base/base/index.yaml b/roles/cmoa_demo_install/files/02-base/base/index.yaml new file mode 100644 index 0000000..62a41a3 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/index.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +entries: {} +generated: "2019-11-05T09:47:03.285264152+09:00" diff --git a/roles/cmoa_demo_install/files/02-base/base/templates/role.yaml b/roles/cmoa_demo_install/files/02-base/base/templates/role.yaml new file mode 100644 index 0000000..28f0e32 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/templates/role.yaml @@ -0,0 +1,16 @@ +kind: ClusterRoleBinding +{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} +metadata: + name: imxc-cluster-admin-clusterrolebinding +subjects: +- kind: ServiceAccount + name: default + namespace: imxc +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin diff --git a/roles/cmoa_demo_install/files/02-base/base/values.yaml b/roles/cmoa_demo_install/files/02-base/base/values.yaml new file mode 100644 index 0000000..e2ad288 --- /dev/null +++ b/roles/cmoa_demo_install/files/02-base/base/values.yaml @@ -0,0 +1,73 @@ +global: + # cluster variables + CLUSTER_ID: cloudmoa + + # default storageClass + DEFAULT_STORAGE_CLASS: exem-local-storage + + # nodeAffinity + affinity_key: cmoa + affinity_value1: worker1 + affinity_value2: worker2 + affinity_value3: worker2 + + # postgres variables + IMXC_POSTGRES_PV_PATH: /media/data/postgres/postgres-data-0 + + #elastic variables + ELASTICSEARCH_PATH1: /media/data/elasticsearch/elasticsearch-data-0 + ELASTICSEARCH_PATH2: /media/data/elasticsearch/elasticsearch-data-1 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + # zookeeper variables + IMXC_ZOOKEEPER_PATH1: /media/data/zookeeper/zookeeper-data-0 + IMXC_ZOOKEEPER_PATH2: /media/data/zookeeper/zookeeper-data-1 + IMXC_ZOOKEEPER_PATH3: /media/data/zookeeper/zookeeper-data-2 + + # kafka variables + IMXC_KAFKA_PV_PATH1: /media/data/kafka/kafka-data-0 + IMXC_KAFKA_PV_PATH2: /media/data/kafka/kafka-data-1 + IMXC_KAFKA_PV_PATH3: /media/data/kafka/kafka-data-2 + KAFKA_BROKER_CONFIG: "{{index .metadata.labels \"failure-domain.beta.kubernetes.io/zone\"}}" + + # cortex variables + IMXC_INGESTER_PV_PATH1: /media/cloudmoa/ingester/ingester-data-1 + IMXC_INGESTER_PV_PATH2: /media/cloudmoa/ingester/ingester-data-2 + IMXC_INGESTER_PV_PATH3: /media/cloudmoa/ingester/ingester-data-3 + + # redis variables + IMXC_REDIS_PV_PATH1: /media/data/redis/redis-data-0 + IMXC_REDIS_PV_PATH2: /media/data/redis/redis-data-1 + IMXC_REDIS_PV_PATH3: /media/data/redis/redis-data-2 + + # rabbitmq variables + RABBITMQ_PATH: /media/data/rabbitmq + + # custom or etc variables + # IMXC_WORKER_NODE_NAME: $IMXC_WORKER_NODE_NAME # deprecated 2021.10.21 + # IMXC_MASTER_IP: 10.10.30.202 + IMXC_API_SERVER_DNS: imxc-api-service + + METRIC_ANALYZER_MASTER_VERSION: rel0.0.0 + METRIC_ANALYZER_WORKER_VERSION: rel0.0.0 + ELASTICSEARCH_VERSION: v1.0.0 + KAFKA_MANAGER_VERSION: v1.0.0 + KAFKA_INITUTILS_VERSION: v1.0.0 + #KAFKA_VERSION: v1.0.0 + KAFKA_VERSION: v1.0.1 + METRICS_SERVER_VERSION: v1.0.0 + POSTGRES_VERSION: v1.0.0 + CASSANDRA_VERSION: v1.0.0 + RABBITMQ_VERSION: v1.0.0 + CORTEX_VERSION: v1.11.0 #v1.9.0 + #CONSUL_VERSION: 0.7.1 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + rabbitmq: + image: + registry: 10.10.31.243:5000/cmoa3 # {{ .Values.global.IMXC_REGISTRY }} + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh new file mode 100755 index 0000000..b3a27ed --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh @@ -0,0 +1,3085 @@ +#!/bin/sh + +#!/bin/bash + +namespace=$1 +export ES_NODEPORT=`kubectl -n ${namespace} get svc elasticsearch -o jsonpath='{.spec.ports[*].nodePort}'` + +export MASTER_IP=`kubectl get node -o wide | grep control-plane | awk '{print $6}'` + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SECURE=true + +if [ $SECURE = true ] +then +PARAM="-u elastic:elastic --insecure" +PROTO="https" +else +PARAM="" +PROTO="http" +fi + +echo Secure=$SECURE +echo Param=$PARAM +echo Proto=$PROTO + +curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices + +echo "curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices" + +# kubernetes_cluster_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_info" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "date": { + "type": "long" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + } +}' + +# kubernetes_cluster_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cluster_history" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_cluster_history": {} + } +}' + +# kubernetes_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_info" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "id": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_info": {} + } +}' + + + +# kubernetes_event_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_event_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_event_info" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_event_info": {} + } +}' + + + + +# kubernetes_job_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_job_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_job_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "commandlist": { + "type": "text", + "index": false + }, + "labellist": { + "type": "text", + "index": false + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_job_info": {} + } +}' + + + +# kubernetes_cronjob_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cronjob_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cronjob_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "jobname": { + "type": "keyword" + }, + "kind": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "lastruntime": { + "type": "long" + }, + "arguments": { + "type": "text", + "index": false + }, + "schedule": { + "type": "keyword" + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_cronjob_info": {} + } +}' + + + + +# kubernetes_network_connectivity +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_network_connectivity-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_network_connectivity" + } + } + }, + "mappings": { + "properties": { + "timestamp": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "container": { + "type": "keyword" + }, + "pid": { + "type": "integer" + }, + "peerNode": { + "type": "keyword" + }, + "peerNamespace": { + "type": "keyword" + }, + "peerService": { + "type": "keyword" + }, + "peerPod": { + "type": "keyword" + }, + "peerContainer": { + "type": "keyword" + }, + "peerPid": { + "type": "integer" + } + } + }, + "aliases": { + "kubernetes_network_connectivity": {} + } +}' + + + +# sparse_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sparse_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "sparse_log" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "date": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "logpath": { + "type": "text", + "index": false + }, + "contents": { + "type": "text" + }, + "lineNumber": { + "type": "integer" + }, + "probability": { + "type": "float" + }, + "subentityId": { + "type": "keyword" + } + } + }, + "aliases": { + "sparse_log": {} + } +}' + + + +# sparse_model +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_model' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_model" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "modifiedDate": { + "type": "long" + }, + "logPath": { + "type": "keyword" + }, + "savedModel": { + "type": "text", + "index": false + } + } + } +}' + + + +# kubernetes_pod_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ +"order": 0, + "index_patterns": [ + "kubernetes_pod_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_info" + } + } + }, + "mappings": { + "properties": { + "eventType": {"type": "keyword"}, + "cluster": {"type": "keyword"}, + "namespace": {"type": "keyword"}, + "node": {"type": "keyword"}, + "pod": {"type": "keyword"}, + "podUID": {"type": "keyword"}, + "podCreationTimestamp": {"type": "long"}, + "podDeletionTimestamp": {"type": "long"}, + "podDeletionGracePeriod": {"type": "long"}, + "resourceVersion": {"type": "keyword"}, + "ownerKind": {"type": "keyword"}, + "ownerName": {"type": "keyword"}, + "ownerUID": {"type": "keyword"}, + "podPhase": {"type": "keyword"}, + "podIP": {"type": "keyword"}, + "podStartTime": {"type": "long"}, + "podReady": {"type": "boolean"}, + "podContainersReady": {"type": "boolean"}, + "isInitContainer": {"type": "boolean"}, + "containerName": {"type": "keyword"}, + "containerID": {"type": "keyword"}, + "containerImage": {"type": "keyword"}, + "containerImageShort": {"type": "keyword"}, + "containerReady": {"type": "boolean"}, + "containerRestartCount": {"type": "integer"}, + "containerState": {"type": "keyword"}, + "containerStartTime": {"type": "long"}, + "containerMessage": {"type": "keyword"}, + "containerReason": {"type": "keyword"}, + "containerFinishTime": {"type": "long"}, + "containerExitCode": {"type": "integer"}, + "containerLastState": {"type": "keyword"}, + "containerLastStartTime": {"type": "long"}, + "containerLastMessage": {"type": "keyword"}, + "containerLastReason": {"type": "keyword"}, + "containerLastFinishTime": {"type": "long"}, + "containerLastExitCode": {"type": "integer"} + } + }, + "aliases": { + "kubernetes_pod_info": {} + } +}' + + + +# kubernetes_pod_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_pod_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_history" + } + } + }, + "mappings": { + "properties": { + "deployName": { + "type": "keyword" + }, + "deployType": { + "type": "keyword" + }, + "deployDate": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "podPhase": { + "type": "keyword" + }, + "startTime": { + "type": "keyword" + }, + "endTime": { + "type": "keyword" + }, + "exitCode": { + "type": "integer" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "time": { + "type": "long" + }, + "containerId": { + "type": "keyword" + }, + "containerName": { + "type": "keyword" + }, + "containerPhase": { + "type": "keyword" + }, + "eventAction": { + "type": "keyword" + }, + "containerStartTime": { + "type": "keyword" + }, + "containerEndTime": { + "type": "keyword" + }, + "containerImage": { + "type": "keyword" + }, + "containerImageShort": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_pod_history": {} + } +}' + + + + +# metric_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/metric_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/metric_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "metric_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "metric_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "anomaly": { + "type": "boolean" + }, + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "instance": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "metricId": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "score": { + "type": "integer" + }, + "subKey": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "yhatLowerUpper": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "aliases": { + "metric_score": {} + } +}' + + + + +# entity_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/entity_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/entity_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "entity_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "entity_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "nodeId": { + "type": "keyword" + }, + "maxId": { + "type": "keyword" + }, + "maxScore": { + "type": "integer" + }, + "entityScore": { + "type": "integer" + } + } + }, + "aliases": { + "entity_score": {} + } +}' + + +# timeline_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/timeline_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/timeline_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "timeline_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "timeline_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "criticalCount": { + "type": "integer" + }, + "warningCount": { + "type": "integer" + }, + "attentionCount": { + "type": "integer" + }, + "normalCount": { + "type": "integer" + }, + "unixtime": { + "type": "long" + } + } + }, + "aliases": { + "timeline_score": {} + } +}' + + + +# spaninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/spaninfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/spaninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "spaninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "spaninfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "ip": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "spanId": { + "type": "keyword" + }, + "parentSpanId": { + "type": "keyword" + }, + "protocolType": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "operation": { + "type": "keyword" + }, + "spanKind": { + "type": "keyword" + }, + "component": { + "type": "keyword" + }, + "error": { + "type": "boolean" + }, + "peerAddress": { + "type": "keyword" + }, + "peerHostname": { + "type": "keyword" + }, + "peerIpv4": { + "type": "keyword" + }, + "peerIpv6": { + "type": "keyword" + }, + "peerPort": { + "type": "integer" + }, + "peerService": { + "type": "keyword" + }, + "samplingPriority": { + "type": "keyword" + }, + "httpStatusCode": { + "type": "integer" + }, + "httpUrl": { + "type": "keyword" + }, + "httpMethod": { + "type": "keyword" + }, + "httpApi": { + "type": "keyword" + }, + "dbInstance": { + "type": "keyword" + }, + "dbStatement": { + "type": "keyword" + }, + "dbType": { + "type": "keyword" + }, + "dbUser": { + "type": "keyword" + }, + "messagebusDestination": { + "type": "keyword" + }, + "logs": { + "dynamic": false, + "type": "nested", + "properties": { + "fields": { + "dynamic": false, + "type": "nested", + "properties": { + "value": { + "ignore_above": 256, + "type": "keyword" + }, + "key": { + "type": "keyword" + } + } + }, + "timestamp": { + "type": "long" + } + } + } + } + }, + "aliases": { + "spaninfo": {} + } +}' + + + +# sta_podinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_podinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_podinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "version": { + "type": "keyword" + }, + "components": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + } + } + }, + "aliases": { + "sta_podinfo": {} + } +}' + + +# sta_httpapi +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpapi-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpapi" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "api": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_httpapi": {} + } +}' + + + +# sta_httpsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpsummary" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "pod": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "api": { + "type": "keyword" + }, + "countTotal": { + "type": "integer" + }, + "errorCountTotal": { + "type": "integer" + }, + "timeTotalMicrosec": { + "type": "integer" + }, + "methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_httpsummary": {} + } +}' + + + +# sta_relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_relation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_relation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "parent": { + "type": "keyword" + }, + "children": { + "type": "nested", + "properties": { + "name": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_relation": {} + } +}' + + + +# sta_externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_externalrelation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "externalNamespace": { + "type": "keyword" + }, + "externalService": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_externalrelation": {} + } +}' + + + +# sta_traceinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_traceinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_traceinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "operationName": { + "type": "keyword" + }, + "spanSize": { + "type": "integer" + }, + "relatedServices": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "error": { + "type": "boolean" + } + } + }, + "aliases": { + "sta_traceinfo": {} + } +}' + + + +# sta_tracetrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_tracetrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_tracetrend" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": {"type": "integer"} + } + }, + { + "errors": { + "match": "error*", + "mapping": {"type": "integer"} + } + } + ] + }, + "aliases": { + "sta_tracetrend": {} + } +}' + +# script_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/script_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + + + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/script_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "script_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "script_history" + } + } + }, + "mappings": { + "properties": { + "taskId": { + "type": "long" + }, + "scriptName": { + "type": "keyword" + }, + "agentName": { + "type": "keyword" + }, + "targetFile": { + "type": "keyword" + }, + "args": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "validCmd": { + "type": "keyword" + }, + "validVal": { + "type": "keyword" + }, + "valid": { + "type": "boolean" + }, + "validResult": { + "type": "keyword" + }, + "cronExp": { + "type": "keyword" + }, + "createUser": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "error": { + "type": "boolean" + }, + "result": { + "type": "keyword" + }, + "order": { + "type": "keyword" + }, + "mtime": { + "type": "keyword" + } + } + }, + "aliases": { + "script_history": {} + } +}' + + +# kubernetes_audit_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_audit_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_audit_log" + }, + "sort.field": "stageTimestamp", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "verb": { + "type": "keyword" + }, + "userName": { + "type": "keyword" + }, + "sourceIps": { + "type": "keyword" + }, + "resource": { + "type": "keyword" + }, + "code": { + "type": "keyword" + }, + "requestReceivedTimestamp": { + "type": "long" + }, + "stageTimestamp": { + "type": "long" + }, + "durationTimestamp": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_audit_log": {} + } +}' + +# license_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/license_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/license_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "license_history-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "license_history" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "license_history": {} + } +}' + +# alert_event_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/alert_event_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/alert_event_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "alert_event_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "alert_event_history" + } + } + }, + "mappings": { + "properties": { + "alertName": { + "type": "keyword" + }, + "clusterId": { + "type": "keyword" + }, + "data": { + "type": "text", + "index": false + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "level": { + "type": "keyword" + }, + "metaId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "startsAt": { + "type": "long" + }, + "threshold": { + "type": "double" + }, + "value": { + "type": "double" + }, + "message": { + "type": "keyword" + }, + "endsAt": { + "type": "long" + }, + "status": { + "type": "keyword" + }, + "hookCollectAt": { + "type": "long" + } + } + }, + "aliases": { + "alert_event_history": {} + } +}' + +# JSPD ilm +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/jspd_ilm' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +# jspd_lite-activetxn +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-activetxn' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-activetxn-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "cpu_time": { + "type": "integer" + }, + "memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_exec_count": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "active_sql_elapse_time": { + "type": "integer" + }, + "db_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "thread_id": { + "type": "long" + }, + "state": { + "type": "short" + }, + "method_id": { + "type": "integer" + }, + "method_seq": { + "type": "integer" + }, + "stack_crc": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-activetxn": {} + } +}' + +# jspd_lite-alert +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-alert' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-alert-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "status": { + "type": "short" + }, + "value": { + "type": "integer" + }, + "pid": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-alert": {} + } +}' + +# jspd_lite-e2einfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-e2einfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-e2einfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "root_tid": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "e2e_info_type": { + "type": "short" + }, + "e2e_key": { + "type": "keyword" + }, + "elapse_time": { + "type": "integer" + }, + "dest_url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-e2einfo": {} + } +}' + +# jspd_lite-methodname +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-methodname' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-methodname-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "method_id": { + "type": "integer" + }, + "class_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "method_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-methodname": {} + } +}' + +# jspd_lite-sqldbinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-sqldbinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-sqldbinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-sqldbinfo": {} + } +}' + +# jspd_lite-txninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "end_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "keyword" + }, + "client_ip": { + "type": "keyword" + }, + "exception": { + "type": "short" + }, + "thread_cpu_time": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "open_conn": { + "type": "integer" + }, + "close_conn": { + "type": "integer" + }, + "open_stmt": { + "type": "integer" + }, + "close_stmt": { + "type": "integer" + }, + "open_rs": { + "type": "integer" + }, + "close_rs": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_execute_count": { + "type": "integer" + }, + "sql_elapse_time": { + "type": "integer" + }, + "sql_elapse_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + }, + "txn_flag": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + }, + "http_status": { + "type": "integer" + }, + "duration": { + "type": "long" + } + } + }, + "aliases": { + "jspd_lite-txninfo": {} + } +}' + +# jspd_lite-txnmethod +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnmethod' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnmethod-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "method_seq": { + "type": "integer" + }, + "method_id": { + "type": "integer" + }, + "calling_method_id": { + "type": "integer" + }, + "stack_crc32": { + "type": "integer" + }, + "calling_stack_crc32": { + "type": "integer" + }, + "elapse_time": { + "type": "integer" + }, + "exec_count": { + "type": "integer" + }, + "error_count": { + "type": "integer" + }, + "cpu_time": { + "type": "integer" + }, + "memory": { + "type": "integer" + }, + "start_time": { + "type": "long" + }, + "method_depth": { + "type": "integer" + }, + "exception": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 32768, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-txnmethod": {} + } +}' + +# jspd_lite-txnsql +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnsql' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnsql-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "cursor_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "method_id": { + "type": "integer" + }, + "execute_count": { + "type": "integer" + }, + "elapsed_time": { + "type": "integer" + }, + "elapsed_time_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "fetch_time_max": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-txnsql": {} + } +}' + +# jspd_lite-wasstat +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-wasstat' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-wasstat-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "active_txns": { + "type": "integer" + }, + "sql_exec_count": { + "type": "long" + }, + "sql_prepare_count": { + "type": "long" + }, + "sql_fetch_count": { + "type": "long" + }, + "txn_end_count": { + "type": "long" + }, + "open_file_count": { + "type": "integer" + }, + "close_file_count": { + "type": "integer" + }, + "open_socket_count": { + "type": "integer" + }, + "close_socket_count": { + "type": "integer" + }, + "txn_elapse": { + "type": "long" + }, + "sql_elapse": { + "type": "long" + }, + "txn_elapse_max": { + "type": "long" + }, + "sql_elapse_max": { + "type": "long" + }, + "txn_error_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-wasstat": {} + } +}' + +# jspd_tta-externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "external_namespace": { + "type": "keyword" + }, + "external_service": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-externalrelation": {} + } +}' + +# jspd_tta-relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "from_service": { + "type": "keyword" + }, + "to_service": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_tta-relation": {} + } +}' + +# jspd_tta-txnlist +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnlist' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnlist-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-txnlist": {} + } +}' + +# jspd_tta-txnsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + }, + "req_count": { + "type": "integer" + }, + "resp_count": { + "type": "integer" + }, + "total_duration": { + "type": "long" + }, + "failed": { + "type": "integer" + }, + "http_methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "http_statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "jspd_tta-txnsummary": {} + } +}' + +# jspd_tta-txntrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txntrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txntrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": { + "type": "integer" + } + } + }, + { + "errors": { + "match": "error*", + "mapping": { + "type": "integer" + } + } + } + ] + }, + "aliases": { + "jspd_tta-txntrend": {} + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "5d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "maximum_metrics" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "maximum_metrics" + }, + "sort.field": "date", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "kind": { + "type": "keyword" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entity": { + "type": "keyword" + }, + "maximum": { + "type": "float" + }, + "date": { + "type": "date", + "format": "yyyy-MM-dd" + } + } + } +}' diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jspd_menumeta.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql new file mode 100644 index 0000000..7ed34ad --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql @@ -0,0 +1,803 @@ +UPDATE public.metric_meta2 SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)' WHERE id = 'container_memory_usage_by_workload'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP' WHERE id = 7; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' WHERE id = 4; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "INFO" + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 6; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 3; \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql new file mode 100644 index 0000000..6b63e62 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql @@ -0,0 +1,919 @@ + +-- from diff + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config +( + id bigint not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + + + +alter table tenant_info + add delete_scheduler_date timestamp; + +alter table tenant_info + add tenant_init_clusters varchar(255); + +alter table cloud_user + add dormancy_date timestamp; + +alter table cloud_user + add status varchar(255) default 'use'::character varying not null; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check|Check Script'; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check'; + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +UPDATE public.menu_meta +SET position = 10::integer +WHERE id = 80::bigint; + +UPDATE public.menu_meta +SET position = 99::integer +WHERE id = 90::bigint; + + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"', true); + + + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); + + +---public.metric_meta2 +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}} + node_memory_SReclaimable_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024'::text WHERE id LIKE 'node#_memory#_used' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100'::text WHERE id LIKE 'host#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = 'sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))'::text WHERE id LIKE 'host#_fs#_total#_by#_mountpoint' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100'::text WHERE id LIKE 'cluster#_memory#_usage' ESCAPE '#'; + + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - (node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}} + node_memory_SReclaimable_bytes{xm_entity_type=''Node'', {filter}})) >= 0 or (node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}})) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} * 100'::text WHERE id LIKE 'node#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})'::text WHERE id LIKE 'host#_memory#_used' ESCAPE '#'; + + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_error_rate', 'Service Pod Transaction Error Rate', 'The number of transaction error rate for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.', '2022-02-15 18:08:58.18', '2022-02-15 18:08:58.18'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_rate', 'Service Transaction Error Rate', 'Service Transaction Error Rate', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.', '2022-02-15 14:33:00.118', '2022-02-15 15:40:17.64'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_elapsed_time_avg', 'Service Transaction Elapsed Time (avg)', 'Service Average Elapsed Time', 'sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2021-11-15 16:09:34.233', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_elapsed_time_avg', 'Service Pod Transaction Elapsed Time (avg)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Pod Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2022-02-15 18:04:55.228', '2022-02-15 18:04:55.228'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_count', 'Service Transaction Error Count', 'Service Transaction Error Count', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) ', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Error Request count:{{humanize $value}}%|{threshold}%.', '2021-11-15 16:10:31.352', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_per_sec', 'Service Transaction Count (per Second)', 'Service Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Svc Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2021-11-15 16:11:19.606', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_per_sec', 'Service Pod Transaction Count (per sec)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-02-15 17:59:39.45', '2022-02-15 17:59:39.45'); + + + +-- Auto-generated SQL script #202202221030 +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_system_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_system_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_user_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_user_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_limit_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_reads_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)' + WHERE id='container_fs_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_writes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_cache_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_max_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_swap_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100)' + WHERE id='container_memory_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_working_set_bytes_by_workload'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_active_txn_per_sec', 'Service Active Transaction Count (per Second)', 'Service Active Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:51:45.946', '2022-03-11 15:51:45.946') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))' +WHERE id = 'imxc_jspd_active_txn_per_sec'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_pod_active_txn_per_sec', 'Service Pod Active Transaction Count (per sec)', 'The number of active transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:53:29.252', '2022-03-11 15:53:29.252') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))' +WHERE id = 'imxc_jspd_pod_active_txn_per_sec'; + + +--public.agent_install_file_info + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql new file mode 100644 index 0000000..e84e9be --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql @@ -0,0 +1,459 @@ + UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent + spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent + spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config + ' WHERE id = 6; \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql new file mode 100644 index 0000000..0d20f2c --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql @@ -0,0 +1,1379 @@ +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +-- 더존(3.3.2) 에서 누락되었던 항목 모두 추가 +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'imxc-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api-demo', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui-demo', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +UPDATE public.agent_install_file_info +SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath:88888889 + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text +WHERE id = 2::bigint; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='topology_idx'; + +UPDATE public.common_setting +SET code_value='spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', + code_group='storageidx' +WHERE code_id='trace_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='event_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='sparse_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='anomaly_idx'; + +UPDATE public.common_setting +SET code_value='alert_event_history', + code_group='storageidx' +WHERE code_id='alert_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='audit_idx'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1'::text WHERE id = 5::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +ALTER TABLE public.alert_rule_config_info ALTER COLUMN config_data TYPE text; + +update alert_rule_config_info +set config_data = '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"' +where config_id = 'rules'; + +ALTER TABLE public.alert_config_info ALTER COLUMN config_data TYPE text, ALTER COLUMN config_default TYPE text; + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql new file mode 100644 index 0000000..5c5d3c9 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql @@ -0,0 +1,8 @@ +-- admin의 owner 속성 추가 +UPDATE cloud_user SET is_tenant_owner = true WHERE user_id = 'admin'; + +-- owner에 대한 종속성을 admin으로 이관기능(필요하면 사용) +UPDATE auth_resource3 SET name = replace(name, 'owner', 'admin') WHERE name like '%|owner|%'; + +-- CLOUD-2305 node_memory_used metric_meta node_memory_SReclaimable_bytes 제거 패치문 반영 +UPDATE metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024' WHERE id = 'node_memory_used'; diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql new file mode 100644 index 0000000..02f01db --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql @@ -0,0 +1,361 @@ +-- agent_install_file_info +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +-- CLOUD-2798 pod_phase_count_by_cluster metric_meta 수정 +UPDATE metric_meta2 SET expr = 'count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))' WHERE id = 'pod_phase_count_by_cluster'; + +-- node_memory_usage 수정 +update metric_meta2 set expr = 'sum by (xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' where id = 'node_memory_usage'; \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql new file mode 100644 index 0000000..7c582c5 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql @@ -0,0 +1,360 @@ +-- CLOUD-3473 Memory capacity 조회 쿼리 수정 +update metric_meta2 set description = 'imxc_kubernetes_node_resource_capacity_memory', +expr = 'sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})' where id = 'cluster_memory_capacity'; + +-- module명 metricdata owner_name 와 일치하도록 변경 +update common_setting set code_value ='cmoa-collector' where code_id = 'Cloudmoa Collector'; +update common_setting set code_value ='imxc-api' where code_id = 'Api Server'; +update common_setting set code_value ='imxc-ui' where code_id = 'Ui Server'; +update common_setting set code_value ='cloudmoa-trace-agent' where code_id = 'Trace Agent'; + +-- CLOUD-4795 Contaeird 환경 Container Network 수집 불가 건 확인 +-- 22.10.08 현대카드 대응 건으로 release 3.4.6에 반영 +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config'::text WHERE id = 3::bigint; + diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql new file mode 100644 index 0000000..92344db --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql @@ -0,0 +1,102 @@ +-- CLOUD-4752 node_memory_usage alert 관련 쿼리 수정 +update metric_meta2 set +expr = 'sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' +where id = 'node_memory_usage'; + +-- CLOUD-6474 node-exporter | GPMAXPROCS 세팅 +-- Auto-generated SQL script #202211241543 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' + WHERE id=4; \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql new file mode 100644 index 0000000..ea66c68 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql @@ -0,0 +1,387 @@ +-- CLOUD-6526 host 관련 쿼리 수정 +-- 수집된 메트릭 시간차로 인해 데이터 표출이 안되는걸 방지하기 위해 rate 5m 추가 +UPDATE metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )' +WHERE id='host_network_io_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )' +WHERE id = 'host_disk_read_write_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (instance) ( +(rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or +(rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))' +WHERE id = 'host_disk_iops'; + +-- CLOUD-8671 Metric-Agent | 데이터 필터링 설정 추가 +-- Workload > Pod 화면 등에 Docker 런타임 환경의 자원 사용량이 2배 가량으로 보이던 문제 픽스 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=3; + +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=6; diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql new file mode 100644 index 0000000..99d1dbe --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql @@ -0,0 +1,2844 @@ +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS warning_sign character VARYING(255); +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS critical_sign character VARYING(255); + +CREATE TABLE IF NOT EXISTS public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +) + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + WHERE public.metric_meta2.id = 'node_contextswitch_and_filedescriptor'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_reads_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_limit_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_writes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_max_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_bytes_by_workload'; + + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_swap_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_working_set_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_cache_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_receive_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_transmit_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_pod_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_container_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + WHERE public.metric_meta2.id = 'cotainer_restart_count_by_workload'; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) + WHERE public.agent_install_file_info.id = 4; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') + WHERE public.agent_install_file_info.id = 3; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) + WHERE public.agent_install_file_info.id = 2; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') + WHERE public.agent_install_file_info.id = 6; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) + WHERE public.agent_install_file_info.id = 7; + +--Menu Resource +--Infrastructure +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Topology'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Resource Usage'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Resource Usage'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Namespace'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Namespace'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Nodes'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Node Details'); + +--Workloads +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Deploy List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Cron Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Pods'); + +--Services +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Structure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Detail'); + +--Statistics & Analysis +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Performance Trends'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Anomaly Score'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Job History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Log Viewer'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Event Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Container Life Cycle'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Service Traces'); + +--Reports +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (62, 'Templates', NULL, 1, 'reportSettings', (select id from auth_resource3 where name='menu|Reports|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Templates'); + +--Dashboards +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Templates'); + +--Hosts +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (80, 'Hosts', '12.Hosts', 1, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Detail'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Group'); + +--Settings +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (90, 'Settings', '08.Setting', 10, NULL, (select id from auth_resource3 where name='menu|Settings'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|User & Group'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Host Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Metric Meta'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|General'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Notification'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alias'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|License'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent Installation'); + +--Health Check +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check|Check Script'); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql new file mode 100644 index 0000000..60ad862 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql @@ -0,0 +1,4 @@ +alter table cloud_user alter column log_in_count set default 0; +alter table cloud_user alter column user_lock set default false; + +UPDATE public.metric_meta2 SET meta_name = 'Number of Containers Restart', description = 'Number of Containers Restart (10m)', expr = 'increase(imxc_kubernetes_container_restart_count{{filter}}[10m])', resource_type = 'State', entity_type = 'Workload', groupby_keys = null, in_use = true, anomaly_score = false, message = 'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.', created_date = '2021-06-23 09:30:38.646312', modified_date = '2021-06-23 09:30:38.646312' WHERE id = 'cotainer_restart_count_by_workload'; \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql new file mode 100644 index 0000000..c8deff4 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql @@ -0,0 +1,1667 @@ +CREATE TABLE public.tenant_info ( + id character varying(255) NOT NULL, + name character varying(255) NOT NULL, + in_used boolean DEFAULT true, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + delete_scheduler_date timestamp without time zone NULL, + contract_id bigint NOT NULL, + tenant_init_clusters character varying(255) NULL +); +ALTER TABLE ONLY public.tenant_info ADD CONSTRAINT tenant_info_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + cluster_id character varying(255) NOT NULL, + description character varying(255), + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + namespace character varying(255) DEFAULT 'default'::character varying +); + +ALTER TABLE public.alert_group OWNER TO admin; + +ALTER TABLE ONLY public.alert_group + ADD CONSTRAINT alert_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX alert_group_name_uindex ON public.alert_group USING btree (name); + +CREATE TABLE public.alert_target ( + id bigint NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + cluster_id character varying(255) NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + alert_group_id bigint, + namespace character varying(255) +); + +ALTER TABLE public.alert_target OWNER TO admin; + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT alert_target_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT fkjrvj775641ky7s0f82kx3sile FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + + + +CREATE TABLE public.report_template ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + enable boolean NOT NULL, + metric_data text, + template_data text, + title character varying(255) +); + +ALTER TABLE public.report_template OWNER TO admin; + +ALTER TABLE ONLY public.report_template + ADD CONSTRAINT report_template_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_event ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + alert_name character varying(255) NOT NULL, + cluster_id character varying(255) NOT NULL, + data text NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + level character varying(255) NOT NULL, + meta_id character varying(255) NOT NULL, + namespace character varying(255), + starts_at bigint NOT NULL, + threshold character varying(255) NOT NULL, + value character varying(255) NOT NULL, + message character varying(255), + ends_at bigint, + status character varying(20) NOT NULL, + hook_collect_at bigint +); + +ALTER TABLE public.alert_event OWNER TO admin; + +CREATE TABLE public.metric_meta2 ( + id character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + description character varying(255) NOT NULL, + expr text NOT NULL, + resource_type character varying(255), + entity_type character varying(255) NOT NULL, + groupby_keys character varying(255), + in_use boolean DEFAULT false NOT NULL, + anomaly_score boolean DEFAULT false NOT NULL, + message character varying(255) NOT NULL, + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + +ALTER TABLE public.metric_meta2 OWNER to admin; + +ALTER TABLE ONLY public.metric_meta2 + ADD CONSTRAINT metric_meta2_pk PRIMARY KEY (id); + +CREATE TABLE public.alert_rule ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + critical float, + name character varying(255), + warning float, + alert_group_id bigint, + alert_rule_meta_id character varying(255) NOT NULL, + alert_target_id bigint, + duration character varying(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + warning_sign character varying(255), + critical_sign character varying(255) +); + +ALTER TABLE public.alert_rule OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT alert_rule_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk6b09d1xfyago6wiiqhdiv03s3 FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk8wkucwkgr48hkfg8cvuptww0f FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fkiqaskea7ts0f872u3nx9ne25u FOREIGN KEY (alert_target_id) REFERENCES public.alert_target(id); + +CREATE TABLE public.alert_rule_meta ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + description text NOT NULL, + expr character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + target character varying(255) NOT NULL, + message character varying(255) +); + +ALTER TABLE public.alert_rule_meta OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule_meta + ADD CONSTRAINT alert_rule_meta_pkey PRIMARY KEY (id); + +CREATE SEQUENCE hibernate_sequence; + +CREATE TABLE public.cloud_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + name character varying(255) NOT NULL, + description character varying(255), + created_by character varying(255), + auth_resource_id bigint +); + +ALTER TABLE public.cloud_group OWNER TO admin; + +ALTER TABLE ONLY public.cloud_group + ADD CONSTRAINT cloud_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX cloud_group_name_uindex ON public.cloud_group USING btree (name); + +CREATE TABLE public.cloud_user ( + user_id character varying(255) NOT NULL, + email character varying(255), + is_admin boolean NOT NULL, + phone character varying(255), + user_nm character varying(255) NOT NULL, + user_pw character varying(255) NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + dormancy_date timestamp without time zone NULL, + company character varying(255), + department character varying(255), + last_log_in_date timestamp without time zone, + "position" character varying(255), + use_ldap boolean NOT NULL, + auth_method character varying(255) NOT NULL, + log_in_count integer default 0 NOT NULL, + user_lock boolean default false NOT NULL, + user_lock_date timestamp without time zone, + tenant_id character varying(120), + is_tenant_owner boolean default false, + auth_resource_id bigint, + status character varying(255) default 'use' NOT NULL +); + +ALTER TABLE public.cloud_user OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user ADD CONSTRAINT cloud_user_pkey PRIMARY KEY (user_id); + +ALTER TABLE ONLY public.cloud_user + ADD CONSTRAINT cloud_user_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.menu_meta ( + id bigint NOT NULL, + description character varying(255), + icon character varying(255), + "position" integer NOT NULL, + url character varying(255), + auth_resource3_id bigint NOT NULL, + scope_level int default 0 +); + +ALTER TABLE public.menu_meta OWNER TO admin; + +ALTER TABLE ONLY public.menu_meta + ADD CONSTRAINT menu_meta_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.metric_base ( + meta_name character varying(255) NOT NULL, + provider character varying(255) NOT NULL, + description character varying(255) NOT NULL, + resource_type character varying(255), + diag_type character varying(255), + entity_type character varying(255) NOT NULL, + metric_type character varying(255) NOT NULL, + keys character varying(255), + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + + +ALTER TABLE public.metric_base OWNER TO admin; + +ALTER TABLE ONLY public.metric_base + ADD CONSTRAINT metric_base_pk PRIMARY KEY (meta_name); + +CREATE TABLE public.report_static ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + metric_data text, + template_data text, + title character varying(255), + type character varying(255), + report_template_id bigint +); + +ALTER TABLE public.report_static OWNER TO admin; + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT report_static_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT fk7o821ym9a57lrcfipf928cfpe FOREIGN KEY (report_template_id) REFERENCES public.report_template(id); + +CREATE TABLE public.user_group ( + user_group_id bigint NOT NULL, + user_id character varying(255) NOT NULL +); + +ALTER TABLE public.user_group OWNER TO admin; + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT user_group_pkey PRIMARY KEY (user_group_id, user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkooy6rip2craw6jy3geb5wnix6 FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkowo8h9te5nwashab3u30docg FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +CREATE TABLE public.cloud_user_profile ( + user_id character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + profile_image oid +); + +ALTER TABLE public.cloud_user_profile OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_profile + ADD CONSTRAINT cloud_user_profile_pkey PRIMARY KEY (user_id); + + +CREATE TABLE public.common_setting ( + code_id character varying(255) NOT NULL, + code_value character varying(255), + code_desc character varying(255), + code_auth character varying(255), + code_group character varying(255), + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.common_setting OWNER TO admin; + +ALTER TABLE ONLY public.common_setting + ADD CONSTRAINT common_setting_pkey PRIMARY KEY (code_id); + + + +CREATE TABLE public.dashboard_thumbnail ( + id bigint NOT NULL, + thumbnail_image oid, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.dashboard_thumbnail OWNER TO admin; + +ALTER TABLE ONLY public.dashboard_thumbnail + ADD CONSTRAINT dashboard_thumbnail_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.notification_channel ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone, + modified_by character varying(255), + modified_date timestamp without time zone, + cluster_id character varying(255), + config text, + name character varying(255), + type character varying(255) +); + +ALTER TABLE public.notification_channel OWNER TO admin; + +ALTER TABLE ONLY public.notification_channel + ADD CONSTRAINT notification_channel_pkey PRIMARY KEY (id); + + +CREATE TABLE public.notification_registry ( + id bigint NOT NULL, + alert_rule_id bigint NOT NULL, + notification_channel_id bigint +); + +ALTER TABLE public.notification_registry OWNER TO admin; + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT notification_registry_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk28xo8snm6fd19i3uap0oba0d1 FOREIGN KEY (notification_channel_id) REFERENCES public.notification_channel(id); + + +CREATE TABLE public.license_check_2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_id integer NOT NULL, + real_host_id integer NOT NULL, + imxc_cpu_count integer NOT NULL, + real_cpu_count integer NOT NULL, + target_clusters_count integer NOT NULL, + real_clusters_count integer NOT NULL, + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + features_bitmap integer NOT NULL, + allowable_range integer NOT NULL, + check_time timestamp without time zone NOT NULL, + check_result integer NOT NULL +); + +ALTER TABLE public.license_check_2 + ADD CONSTRAINT license_check_pkey PRIMARY KEY (id); + +CREATE INDEX license_check_check_time_idx ON license_check_2(check_time); + + +CREATE TABLE public.license_violation ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone +); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check_2(id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check_2(id); + +CREATE INDEX license_violation_check_time_idx ON license_violation(check_time); +CREATE INDEX license_violation_resolved_time_idx ON license_violation(resolved_time); + + +CREATE TABLE public.license_key ( + id bigint NOT NULL, + license_key text NOT NULL, + set_time timestamp NOT NULL, + in_used bool NULL, + tenant_id varchar NULL, + cluster_id bigint NULL, + CONSTRAINT license_key_pkey PRIMARY KEY (id) +); + +ALTER TABLE public.license_key ADD CONSTRAINT license_key_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.license_check2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_ids character varying(255), + real_host_ids character varying(255), + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + allowable_range integer NOT NULL, + license_cluster_id character varying(255), + check_time timestamp without time zone NOT NULL, + check_result integer NOT null +); + +ALTER TABLE public.license_check2 + ADD CONSTRAINT license_check2_pkey PRIMARY KEY (id); + +CREATE INDEX license_check2_time_idx ON license_check2(check_time); + +CREATE TABLE public.license_violation2 ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone, + cluster_id varchar not null +); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check2(id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check2(id); + +CREATE INDEX license_violation2_check_time_idx ON license_violation2(check_time); +CREATE INDEX license_violation2_resolved_time_idx ON license_violation2(resolved_time); + +CREATE TABLE public.license_key2 ( + id bigint not null, + license_key text not null, + set_time timestamp without time zone not null, + cluster_id varchar, + license_used bool not null +); + +ALTER TABLE public.license_key2 + ADD CONSTRAINT license_key2_pkey PRIMARY KEY (id); + +create table public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +CREATE TABLE public.auth_resource2 ( + id bigint NOT NULL default nextval('hibernate_sequence'), + access_type integer NOT NULL, + name character varying(255) NOT NULL, + parent_id bigint, + type character varying(255) NOT NULL +); + +ALTER TABLE public.auth_resource2 OWNER TO admin; + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT auth_resource2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT resource_name_uniq UNIQUE (name, type, parent_id); + +--ALTER TABLE ONLY public.auth_resource2 +-- ADD CONSTRAINT auth_resource2_auth_resource_id_fk FOREIGN KEY (parent_id) REFERENCES public.auth_resource2(id); +-- +--ALTER TABLE ONLY public.menu_meta +-- ADD CONSTRAINT fk2tqq4ybf6w130fsaejhrsnw5s FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.user_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_id character varying(255) +); + +ALTER TABLE public.user_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.user_permission2 +-- ADD CONSTRAINT user_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_user_id_fk FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + + +CREATE TABLE public.group_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_group_id bigint +); + +ALTER TABLE public.group_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_user_group_id_fk FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +-- ALTER TABLE ONLY public.group_permission2 +-- ADD CONSTRAINT group_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.resource_group2 ( + id int8 NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + "name" varchar(255) NOT NULL, + description varchar(255) NULL, + CONSTRAINT resource_group2_pkey PRIMARY KEY (id) +-- CONSTRAINT resource_group2_fk1 FOREIGN KEY (id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_group2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_group2 TO "admin"; + +CREATE TABLE public.resource_member2 ( + resource_group_id int8 NOT NULL, + auth_resource_id int8 NOT NULL, + CONSTRAINT resource_member2_pkey PRIMARY KEY (resource_group_id, auth_resource_id), + CONSTRAINT resource_member2_fkey1 FOREIGN KEY (resource_group_id) REFERENCES resource_group2(id) +-- CONSTRAINT resource_member2_fkey2 FOREIGN KEY (auth_resource_id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_member2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_member2 TO "admin"; + +CREATE TABLE public.dashboard2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + layout text NOT NULL, + title character varying(255) NOT NULL, + auth_resource_id bigint NOT NULL, + created_by character varying(255) NOT NULL, + modified_by character varying(255) NOT NULL, + description character varying(255), + share boolean DEFAULT false +); + +ALTER TABLE public.dashboard2 OWNER TO admin; + +ALTER TABLE ONLY public.dashboard2 + ADD CONSTRAINT dashboard2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.dashboard2 +-- ADD CONSTRAINT dashboard_resource_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.log_management ( + cluster_id varchar NOT NULL, + node_id varchar NOT NULL, + log_rotate_dir varchar, + log_rotate_count integer, + log_rotate_size integer, + log_rotate_management boolean NOT NULL, + back_up_dir varchar, + back_up_period integer, + back_up_dir_size integer, + back_up_management boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +alter table public.log_management add constraint log_management_pkey primary key (cluster_id, node_id); + +CREATE TABLE public.sampling_setting ( + service_id bigint NOT NULL, + service_name character varying(255), + sampling_type character varying(255), + sampling_param character varying(255), + cluster varchar, + namespace varchar, + cluster_id bigint +); +ALTER TABLE public.sampling_setting OWNER TO admin; + +ALTER TABLE ONLY public.sampling_setting + ADD CONSTRAINT sampling_setting_pkey PRIMARY KEY (service_id); + +CREATE TABLE public.operation_setting ( + id bigint NOT NULL, + service_id bigint NOT NULL, + sampling_type character varying(255), + sampling_param character varying(255), + operation_name character varying(255) +); + +ALTER TABLE public.operation_setting OWNER TO admin; + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_fkey FOREIGN KEY (service_id) REFERENCES public.sampling_setting(service_id); + +CREATE TABLE public.cluster_setting ( + cluster_id bigint NOT NULL, + param_type character varying(255), + param_value character varying(255), + cluster_name varchar, + name character varying(255) +); + +ALTER TABLE ONLY public.cluster_setting + ADD CONSTRAINT cluster_setting_pkey PRIMARY KEY (cluster_id); + +CREATE TABLE public.alias_code ( + user_id varchar NOT NULL, + id varchar NOT NULL, + name varchar, + type varchar, + use_yn varchar, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.alias_code add constraint alias_code_pkey primary key (user_id, id); + +CREATE TABLE public.sparse_log_info ( + id varchar NOT NULL, + cluster_id varchar, + namespace varchar, + target_type varchar, + target_id varchar, + log_path varchar, + created_date timestamp, + modified_date timestamp, + threshold float4, + PRIMARY KEY ("id") +); + +CREATE TABLE public.view_code ( + user_id varchar NOT NULL, + view_id varchar NOT NULL, + json_data text, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.view_code add constraint view_code_pkey primary key (user_id, view_id); + +CREATE TABLE public.entity_black_list ( + entity_type varchar not null, + entity_name varchar not null, + cluster_id varchar not null, + namespace varchar, + black_list bool not null, + workload varchar(255) not null +); + +ALTER TABLE public.entity_black_list + ADD CONSTRAINT entity_black_list_pkey PRIMARY KEY (entity_type, entity_name, cluster_id, namespace); + +CREATE TABLE public.script_setting ( + id bigint NOT NULL, + name character varying(255), + agent_list character varying(255), + file_path character varying(255), + args character varying(255), + valid_cmd character varying(255), + valid_val character varying(255), + cron_exp character varying(255), + create_user character varying(255), + mtime BIGINT, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.script_setting + ADD CONSTRAINT script_setting_pkey PRIMARY KEY (id); + +CREATE TABLE public.agent_install_file_info ( + id bigint NOT NULL, + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + description text, + version character varying(255), + yaml text, + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.agent_install_file_info ADD CONSTRAINT agent_install_file_info_pkey PRIMARY KEY (id); + +create table auth_resource3( + id bigint NOT NULL default nextval('hibernate_sequence'), + name character varying(255) NOT NULL, + is_deleted boolean not null, + tenant_id character varying(255) +); + +ALTER TABLE public.auth_resource3 owner to admin; + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_name_uniq UNIQUE (name); + +create table resource_member3( + resource_group_id bigint not null, + auth_resource3_id bigint not null +); + +ALTER TABLE resource_member3 owner to admin; + +ALTER TABLE ONLY public.resource_member3 + ADD CONSTRAINT resource_member3_pkey primary key (resource_group_id, auth_resource3_id); + +ALTER TABLE ONLY public.auth_resource3 ADD CONSTRAINT auth_resource3_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +ALTER TABLE public.menu_meta ADD CONSTRAINT menu_meta_auth_resource3_fk FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.user_permission2 ADD CONSTRAINT user_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_group2 ADD CONSTRAINT resource_group2_auth_resource3_fk1 FOREIGN KEY (id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey1 FOREIGN KEY (resource_group_id) REFERENCES public.resource_group2(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey2 FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.group_permission2 ADD CONSTRAINT group_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.dashboard2 ADD CONSTRAINT dashboard2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_user ADD CONSTRAINT cloud_user_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_group ADD CONSTRAINT cloud_group_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +-- noti server table +CREATE TABLE public.alert_group_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + cluster_id varchar(255) NOT NULL, + description varchar(255), + name varchar(255) NOT NULL, + type varchar(255) NOT NULL, + namespace varchar(255) default 'default'::character varying, + destination varchar(255) NOT NULL, + created_by varchar(255) NOT NULL +); + +CREATE TABLE public.alert_target_v2 ( + id bigint NOT NULL, + created_date timestamp, + modified_date timestamp, + cluster_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + alert_group_id bigint, + namespace varchar(255) +); + +CREATE TABLE public.alert_rule_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + critical double precision, + name varchar(255), + warning double precision, + alert_group_id bigint, + alert_rule_meta_id varchar(255) NOT NULL, + alert_target_id bigint, + duration varchar(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + critical_sign varchar(255), + warning_sign varchar(255), + destination varchar(255), + created_by varchar(255) +); + +ALTER TABLE public.alert_group_v2 ADD CONSTRAINT alert_group_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_id_pk PRIMARY KEY (id); + +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_rule_meta_id_fk FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_target_id_fk FOREIGN KEY (alert_target_id) REFERENCES public.alert_target_v2(id); +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk4lljw4fnija73tm3lthjg90rx FOREIGN KEY (alert_rule_id) REFERENCES public.alert_rule_v2(id); + + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config +( + id varchar not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +-------- 2022-05-31 KubeInfo flatting table -------- +CREATE TABLE cmoa_configmap_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + kind_status varchar(50), + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + binaryData text, + data text, + immutable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +----------------------- +CREATE TABLE cmoa_cronjob_active( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_active_apiVersion text, + status_active_fieldPath text, + status_active_kind text, + status_active_name text, + status_active_namespace text, + status_active_resourceVersion text, + status_active_uid text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_cronjob_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_failedJobsHistoryLimit text, + spec_schedule text, + spec_successfulJobsHistoryLimit text, + spec_suspend text, + status_lastScheduleTime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_daemonset_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + status_currentNumberScheduled text, + status_desiredNumberScheduled text, + status_numberAvailable text, + status_numberMisscheduled text, + status_numberReady text, + status_numberUnavailable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_deployment_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_replicas text, + spec_template_spec_containers_image text, + status_availableReplicas text, + status_readyReplicas text, + status_replicas text, + status_unavailableReplicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_addresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_addresses_ip text, + subset_addresses_hostname text, + subset_addresses_nodeName text, + subset_addresses_targetRef text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_notreadyaddresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_notreadyaddresses_ip text, + subset_notreadyaddresses_hostname text, + subset_notreadyaddresses_nodename text, + subset_notreadyaddresses_targetref text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_ports( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_ports_port text, + subset_ports_appprotocol text, + subset_ports_name text, + subset_ports_protocol text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_event_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + action text, + count text, + eventtime text, + firsttimestamp text, + involvedobject_apiversion text, + involvedobject_fieldpath text, + involvedobject_kind text, + involvedobject_name text, + involvedobject_namespace text, + involvedobject_resourceversion text, + involvedobject_uid text, + lasttimestamp text, + message text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + reason text, + related_apiversion text, + related_fieldpath text, + related_kind text, + related_name text, + related_namespace text, + related_resourceversion text, + related_uid text, + series_count text, + series_lastobservedtime text, + series_state text, + source_component text, + source_host text, + type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_resourceversion text, + spec_backofflimit text, + spec_completions text, + spec_parallelism text, + status_active text, + status_completiontime text, + status_failed text, + status_starttime text, + status_succeeded text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_template ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_template_spec_containers_args text, + spec_template_spec_containers_command text, + spec_template_spec_containers_image text, + spec_template_spec_containers_name text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_namespace_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + spec_finalizers text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_annotations text, + spec_podcidr text, + spec_taints text, + status_capacity_cpu text, + status_capacity_ephemeral_storage text, + status_capacity_hugepages_1gi text, + status_capacity_hugepages_2mi text, + status_capacity_memory text, + status_capacity_pods text, + status_allocatable_cpu text, + status_allocatable_ephemeral_storage text, + status_allocatable_hugepages_1gi text, + status_allocatable_hugepages_2mi text, + status_allocatable_memory text, + status_allocatable_pods text, + status_addresses text, + status_daemonendpoints_kubeletendpoint_port text, + status_nodeinfo_machineid text, + status_nodeinfo_systemuuid text, + status_nodeinfo_bootid text, + status_nodeinfo_kernelversion text, + status_nodeinfo_osimage text, + status_nodeinfo_containerruntimeversion text, + status_nodeinfo_kubeletversion text, + status_nodeinfo_kubeproxyversion text, + status_nodeinfo_operatingsystem text, + status_nodeinfo_architecture text, + status_volumesinuse text, + status_volumesattached text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_condition ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lastheartbeattime text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_image ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_images_names text, + status_images_sizebytes text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolume_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_awselasticblockstore text, + spec_azuredisk text, + spec_azurefile text, + spec_capacity text, + spec_claimref_apiversion text, + spec_claimref_fieldpath text, + spec_claimref_kind text, + spec_claimref_name text, + spec_claimref_namespace text, + spec_claimref_resourceversion text, + spec_claimref_uid text, + spec_csi text, + spec_fc text, + spec_flexvolume text, + spec_flocker text, + spec_gcepersistentdisk text, + spec_glusterfs text, + spec_hostpath text, + spec_iscsi text, + spec_local text, + spec_nfs text, + spec_persistentvolumereclaimpolicy text, + spec_photonpersistentdisk text, + spec_portworxvolume text, + spec_quobyte text, + spec_rbd text, + spec_scaleio text, + spec_storageclassname text, + spec_storageos text, + spec_volumemode text, + spec_vspherevolume text, + status_message text, + status_phase text, + status_reason text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolumeclaim_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_storageclassname text, + spec_volumemode text, + spec_volumename text, + status_accessmodes text, + status_capacity text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + kind_status varchar(50), + metadata_uid varchar(40), + row_index int, + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_generatename text, + metadata_namespace text, + metadata_deletiontimestamp text, + metadata_deletiongraceperiodseconds text, + metadata_labels text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + spec_hostnetwork text, + spec_priorityclassname text, + spec_enableservicelinks text, + spec_priority text, + spec_schedulername text, + spec_hostpid text, + spec_nodename text, + spec_serviceaccount text, + spec_serviceaccountname text, + spec_dnspolicy text, + spec_terminationgraceperiodseconds text, + spec_restartpolicy text, + spec_securitycontext text, + spec_nodeselector_kubernetes_io_hostname text, + spec_tolerations text, + status_phase text, + status_hostip text, + status_podip text, + status_starttime text, + status_qosclass text, + status_reason text, + status_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_conditions ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + status_conditions_lastprobetime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containerstatuses ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_containerstatuses_name text, + status_containerstatuses_ready text, + status_containerstatuses_restartcount text, + status_containerstatuses_image text, + status_containerstatuses_imageid text, + status_containerstatuses_containerid text, + status_containerstatuses_state_terminated_exitcode text, + status_containerstatuses_state_terminated_reason text, + status_containerstatuses_state_terminated_startedat text, + status_containerstatuses_state_terminated_finishedat text, + status_containerstatuses_state_terminated_containerid text, + status_containerstatuses_state_waiting_reason text, + status_containerstatuses_state_waiting_message text, + status_containerstatuses_state_running_startedat text, + status_containerstatuses_laststate_terminated_exitcode text, + status_containerstatuses_laststate_terminated_reason text, + status_containerstatuses_laststate_terminated_startedat text, + status_containerstatuses_laststate_terminated_finishedat text, + status_containerstatuses_laststate_terminated_containerid text, + status_containerstatuses_laststate_waiting_reason text, + status_containerstatuses_laststate_waiting_message text, + status_containerstatuses_laststate_running_startedat text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containers ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_containers_name text, + spec_containers_image text, + spec_containers_env text, + spec_containers_resources_limits_cpu text, + spec_containers_resources_limits_memory text, + spec_containers_resources_requests_cpu text, + spec_containers_resources_requests_memory text, + spec_containers_volumemounts text, + spec_containers_securitycontext_privileged text, + spec_containers_command text, + spec_containers_ports text, + spec_containers_args text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_volume ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_volumes_name text, + spec_volumes_hostpath text, + spec_volumes_secret text, + spec_volumes_configmap text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_replicaset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_availablereplicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_hard text, + spec_scopes text, + status_hard text, + status_used text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_scopeselector ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_scopeselector_matchexpressions_operator text, + spec_scopeselector_matchexpressions_scopename text, + spec_scopeselector_matchexpressions_values text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_deletiongraceperiodseconds text, + metadata_deletiontimestamp text, + metadata_labels text, + metadata_namespace text, + spec_clusterip text, + spec_externalips text, + spec_selector text, + spec_type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_ports ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_ports_appprotocol text, + spec_ports_name text, + spec_ports_nodeport text, + spec_ports_port text, + spec_ports_protocol text, + spec_ports_targetport text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_statefulset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); + +CREATE TABLE public.api_error_history ( + id int8 NOT NULL, + api_msg varchar(255) NULL, + code varchar(255) NULL, + "exception" varchar(255) NULL, + http_error varchar(255) NULL, + http_status int4 NULL, + occureence_time varchar(255) NULL, + params varchar(255) NULL, + "path" varchar(255) NULL, + "type" varchar(255) NULL, + CONSTRAINT api_error_history_pkey PRIMARY KEY (id) +); + +CREATE TABLE public.metric_score ( + clst_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + metric_id varchar(255) NOT NULL, + sub_key varchar(255) NOT NULL, + unixtime int4 NOT NULL, + anomaly bool NOT NULL, + cont_name varchar(255) NULL, + "instance" varchar(255) NULL, + "namespace" varchar(255) NULL, + node_id varchar(255) NULL, + pod_id varchar(255) NULL, + score int4 NOT NULL, + yhat_lower_upper json NULL, + CONSTRAINT metric_score_pkey PRIMARY KEY (clst_id, entity_id, entity_type, metric_id, sub_key, unixtime) +); + + +CREATE TABLE public.tenant_info_auth_resources ( + tenant_info_id varchar(255) NOT NULL, + auth_resources_id int8 NOT NULL, + CONSTRAINT tenant_info_auth_resources_pkey PRIMARY KEY (tenant_info_id, auth_resources_id), + CONSTRAINT uk_7s6l8e2c8gli4js43c4xoifcl UNIQUE (auth_resources_id) +); + + +-- public.tenant_info_auth_resources foreign keys + +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkkecsc13ydhwg8u05aumkqbnx1 FOREIGN KEY (tenant_info_id) REFERENCES public.tenant_info(id); +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkpvvec4ju3hsma6s1rtgvr4mf6 FOREIGN KEY (auth_resources_id) REFERENCES public.auth_resource3(id); \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql new file mode 100644 index 0000000..e6335f3 --- /dev/null +++ b/roles/cmoa_demo_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql @@ -0,0 +1,2380 @@ +INSERT INTO public.tenant_info (id, name, in_used, created_date, modified_date, contract_id) VALUES ('DEFAULT_TENANT', 'admin', true, now(), now(), 0); + +INSERT INTO public.auth_resource2 (id, access_type, name, parent_id, type) VALUES (-1, 4, 'null', NULL, 'null'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Infrastructure', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Workloads', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Services', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Diagnosis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Statistics & Analysis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Reports', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Settings', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Hosts', -1, 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Dashboards', -1 , 'menu'); +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Health Check', -1, 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Namespace', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Nodes', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Node Details', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Usage', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Persistent Volume', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Pods', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Cron Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Deploy List', (select id from auth_resource2 where type='menu' and name='Workloads'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Structure', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Diagnosis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Troubleshooting', (select id from auth_resource2 where type='menu' and name='Diagnosis') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Performance Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Job History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Log Viewer', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Event Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert Analysis', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Container Life Cycle', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Traces', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Used Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'User & Group', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alerts', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'General', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Metric Meta', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Notification', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Host Alerts', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'License', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Agent', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alias', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Group', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'CloudMOA - Nodes Resource', NULL, 'dashboard'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Detail', NULL, 'dashboard'); + +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES(4, 'Check Script', (select id from auth_resource2 where type='menu' and name='Health Check'), 'menu'); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards', false, null); +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Namespace', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Nodes', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Node Details', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Resource Usage', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Pods', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Jobs', false, null); +-- NSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Cron Jobs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Deploy List', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Structure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis|Anomaly Score', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Performance Trends', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Anomaly Score', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Job History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Log Viewer', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Event Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Container Life Cycle', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Service Traces', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|User & Group', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|General', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Metric Meta', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Notification', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Host Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|License', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alias', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent Installation', false, NULL); + + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Group', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check|Check Script', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('userGroup|admin|default', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin|owner', false, 'DEFAULT_TENANT'); + +INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('admin', NULL, true, NULL, 'admin', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin')); +INSERT INTO public.cloud_group (id, created_date, modified_date, name, description) VALUES ((select id from auth_resource3 where name='userGroup|admin|default'), now(), now(), 'default', '기본그룹정의'); + +--INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('owner', NULL, false, NULL, 'owner', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin|owner')); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +--INSERT INTO public.cloud_user_setting +--(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +--VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|CloudMOA - Nodes Resource', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|Service Detail', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('cluster|cloudmoa', false, 'DEFAULT_TENANT'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (select id from auth_resource3 where name='menu|Infrastructure'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (select id from auth_resource3 where name='menu|Infrastructure|Topology'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (select id from auth_resource3 where name='menu|Infrastructure|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (select id from auth_resource3 where name='menu|Infrastructure|Resource Usage'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (select id from auth_resource3 where name='menu|Infrastructure|Namespace'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (30, 'Diagnosis', '05.Diagnosis', 4, NULL, (select id from auth_resource3 where name='menu|Diagnosis'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (31, 'Anomaly Score Detail', NULL, 0, 'anomalyScoreDiagnosis', (select id from auth_resource3 where name='menu|Diagnosis|Anomaly Score'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (62, 'Templates', NULL, 1, 'templateReport', (select id from auth_resource3 where name='menu|Reports|Templates'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2); + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (80, 'Hosts', '12.Hosts', 10, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (90, 'Settings', '08.Setting', 99, NULL, (select id from auth_resource3 where name='menu|Settings'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2); + +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0); + +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Resource Usage'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Namespace'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Nodes'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Node Details'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Deploy List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Cron Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Pods'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Structure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Detail'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis|Anomaly Score'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Performance Trends'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Anomaly Score'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Job History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Log Viewer'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Event Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Container Life Cycle'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Service Traces'), 'owner'); +-- +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Templates'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Templates'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|User & Group'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alerts'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Metric Meta'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|General'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Notification'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alias'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|License'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent Installation'), 'owner'); + +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cadvisor_version_info', 'cadvisor', 'A metric with a constant ''1'' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_periods_total', 'cadvisor', 'Number of elapsed enforcement period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_periods_total', 'cadvisor', 'Number of throttled period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_seconds_total', 'cadvisor', 'Total time duration the container has been throttled.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_load_average_10s', 'cadvisor', 'Value of container cpu load average over the last 10 seconds.', 'CPU', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_periods_total', 'cadvisor', 'Number of times processes of the cgroup have run on the cpu', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_seconds_total', 'cadvisor', 'Time duration the processes of the container have run on the CPU.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_runqueue_seconds_total', 'cadvisor', 'Time duration processes of the container have been waiting on a runqueue.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_system_seconds_total', 'cadvisor', 'Cumulative system cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_usage_seconds_total', 'cadvisor', 'Cumulative cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_user_seconds_total', 'cadvisor', 'Cumulative user cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_limit_bytes', 'cadvisor', 'Number of bytes that can be consumed by the container on this filesystem.', NULL, NULL, 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_last_seen', 'cadvisor', 'Last time a container was seen by the exporter', NULL, NULL, 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_bytes_total', 'cadvisor', 'Cumulative count of bytes received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while receiving', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_total', 'cadvisor', 'Cumulative count of packets received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_bytes_total', 'cadvisor', 'Cumulative count of bytes transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_errors_total', 'cadvisor', 'Cumulative count of errors encountered while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_total', 'cadvisor', 'Cumulative count of packets transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_scrape_error', 'cadvisor', '1 if there was an error while getting container metrics, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_period', 'cadvisor', 'CPU period of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_quota', 'cadvisor', 'CPU quota of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_cache', 'cadvisor', 'Number of bytes of page cache memory.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failcnt', 'cadvisor', 'Number of memory usage hits limits', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failures_total', 'cadvisor', 'Cumulative count of memory allocation failures.', 'Memory', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_max_usage_bytes', 'cadvisor', 'Maximum memory usage recorded in bytes', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_rss', 'cadvisor', 'Size of RSS in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_swap', 'cadvisor', 'Container swap usage in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_usage_bytes', 'cadvisor', 'Current memory usage in bytes, including all memory regardless of when it was accessed', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_working_set_bytes', 'cadvisor', 'Current working set in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_tcp_usage_total', 'cadvisor', 'tcp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'tcp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_udp_usage_total', 'cadvisor', 'udp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'udp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_shares', 'cadvisor', 'CPU share of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_limit_bytes', 'cadvisor', 'Memory limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_swap_limit_bytes', 'cadvisor', 'Memory swap limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_start_time_seconds', 'cadvisor', 'Start time of the container since unix epoch in seconds.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_tasks_state', 'cadvisor', 'Number of tasks in given state', NULL, NULL, 'Container', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds', 'prometheus', 'The HTTP request latencies in microseconds.', NULL, 'DURATION', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_requests_total', 'prometheus', 'Total number of scrapes by HTTP status code.', NULL, 'ERROR', 'Node', 'counter', 'code,method', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_bytes_average', 'cloudwatch', 'Bytes read from all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds', 'micrometer', 'Server Response in second', NULL, 'RATE', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_count', 'micrometer', 'the total number of requests.', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_sum', 'micrometer', 'the total time taken to serve the requests', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_max', 'micrometer', 'the max number of requests.', NULL, 'RATE', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_bytes_average', 'cloudwatch', 'Bytes written to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_loaded', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_unloaded_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_live_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_max_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_allocated_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_promoted_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_count', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_max', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_sum', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_arp_entries', 'node_exporter', 'ARP entries by device', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_boot_time_seconds', 'node_exporter', 'Node boot time, in unixtime.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_context_switches_total', 'node_exporter', 'Total number of context switches.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_core_throttles_total', 'node_exporter', 'Number of times this cpu core has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'core', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_hertz', 'node_exporter', 'Current cpu thread frequency in hertz.', 'CPU', 'LOAD', 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_max_hertz', 'node_exporter', 'Maximum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_min_hertz', 'node_exporter', 'Minimum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_guest_seconds_total', 'node_exporter', 'Seconds the cpus spent in guests (VMs) for each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_package_throttles_total', 'node_exporter', 'Number of times this cpu package has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'package', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_seconds_total', 'node_exporter', 'Seconds the cpus spent in each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu,mode', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_entropy_available_bits', 'node_exporter', 'Bits of available entropy.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_exporter_build_info', 'node_exporter', 'A metric with a constant ''1'' value labeled by version, revision, branch, and goversion from which node_exporter was built.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_cpuutilization_average', 'cloudwatch', 'The percentage of allocated EC2 compute units that are currently in use on the instance.', 'CPU', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_ops_average', 'cloudwatch', 'Completed read operations from all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_ops_average', 'cloudwatch', 'Completed write operations to all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_bytes_average', 'cloudwatch', 'Bytes read from all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_bytes_average', 'cloudwatch', 'Bytes written to all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_in_average', 'cloudwatch', 'The number of bytes received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_out_average', 'cloudwatch', 'The number of bytes sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_readonly', 'node_exporter', 'Filesystem read-only status.', NULL, NULL, 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_in_average', 'cloudwatch', 'The number of packets received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_forks_total', 'node_exporter', 'Total number of forks.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_chip_names', 'node_exporter', 'Annotation metric for human-readable chip names', 'CPU', 'LOAD', 'Node', 'gauge', 'chip', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_fan_rpm', 'node_exporter', 'Hardware monitor for fan revolutions per minute (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_pwm', 'node_exporter', 'Hardware monitor pwm element ', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_sensor_label', 'node_exporter', 'Label for given chip and sensor', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_celsius', 'node_exporter', 'Hardware monitor for temperature (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_alarm_celsius', 'node_exporter', 'Hardware monitor for temperature (crit_alarm)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_celsius', 'node_exporter', 'Hardware monitor for temperature (crit)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_max_celsius', 'node_exporter', 'Hardware monitor for temperature (max)', NULL, NULL, 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_intr_total', 'node_exporter', 'Total number of interrupts serviced.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_out_average', 'cloudwatch', 'The number of packets sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_ops_average', 'cloudwatch', 'Completed read operations from all Amazon EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_ops_average', 'cloudwatch', 'Completed write operations to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load1', 'node_exporter', '1m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load15', 'node_exporter', '15m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load5', 'node_exporter', '5m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_completed_total', 'node_exporter', 'The total number of reads completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_merged_total', 'node_exporter', 'The total number of reads merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_write_time_seconds_total', 'node_exporter', 'This is the total number of seconds spent by all writes.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_completed_total', 'node_exporter', 'The total number of writes completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_merged_total', 'node_exporter', 'The number of writes merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_written_bytes_total', 'node_exporter', 'The total number of bytes written successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries', 'node_exporter', 'Number of currently allocated flow entries for connection tracking.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries_limit', 'node_exporter', 'Maximum size of connection tracking table.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_duration_seconds', 'node_exporter', 'node_exporter: Duration of a collector scrape.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_success', 'node_exporter', 'node_exporter: Whether a collector succeeded.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_textfile_scrape_error', 'node_exporter', '1 if there was an error opening or reading a file, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_time_seconds', 'node_exporter', 'System time in seconds since epoch (1970).', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_estimated_error_seconds', 'node_exporter', 'Estimated error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_frequency_adjustment_ratio', 'node_exporter', 'Local clock frequency adjustment.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_loop_time_constant', 'node_exporter', 'Phase-locked loop time constant.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_maxerror_seconds', 'node_exporter', 'Maximum error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_offset_seconds', 'node_exporter', 'Time offset in between local system and reference clock.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_calibration_total', 'node_exporter', 'Pulse per second count of calibration intervals.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_error_total', 'node_exporter', 'Pulse per second count of calibration errors.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_frequency_hertz', 'node_exporter', 'Pulse per second frequency.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_seconds', 'node_exporter', 'Pulse per second jitter.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_total', 'node_exporter', 'Pulse per second count of jitter limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_shift_seconds', 'node_exporter', 'Pulse per second interval duration.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_exceeded_total', 'node_exporter', 'Pulse per second count of stability limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_hertz', 'node_exporter', 'Pulse per second stability, average of recent frequency changes.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_status', 'node_exporter', 'Value of the status array bits.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_sync_status', 'node_exporter', 'Is clock synchronized to a reliable server (1 = yes, 0 = no).', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tai_offset_seconds', 'node_exporter', 'International Atomic Time (TAI) offset.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tick_seconds', 'node_exporter', 'Seconds between clock ticks.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_uname_info', 'node_exporter', 'Labeled system information as provided by the uname system call.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_oom_kill', 'node_exporter', '/proc/vmstat information field oom_kill.', NULL, 'ERROR', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_cpu_usage', 'micrometer', 'The "recent cpu usage" for the Java Virtual Machine process', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_uptime_seconds', 'micrometer', 'Process uptime in seconds.', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_count', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_max', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_sum', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_cpu_usage', 'micrometer', 'The "recent cpu usage" for the whole system', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_load_average_1m', 'micrometer', 'The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('up', 'prometheus', '1 if the instance is healthy, i.e. reachable, or 0 if the scrape failed.', NULL, 'ERROR', 'Any', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('go_threads', 'prometheus', 'Number of OS threads created.', 'Thread', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes', 'prometheus', 'The HTTP request sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes', 'prometheus', 'The HTTP response sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_count', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_total_capacity_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_committed_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_max_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_daemon', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_live', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_peak', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_now', 'node_exporter', 'The number of I/Os currently in progress.', 'Disk', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_seconds_total', 'node_exporter', 'Total seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_weighted_seconds_total', 'node_exporter', 'The weighted # of seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_bytes_total', 'node_exporter', 'The total number of bytes read successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_time_seconds_total', 'node_exporter', 'The total number of seconds spent by all reads.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_allocated', 'node_exporter', 'File descriptor statistics: allocated.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_maximum', 'node_exporter', 'File descriptor statistics: maximum.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_connections_total', 'node_exporter', 'The total number of connections made.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_bytes_total', 'node_exporter', 'The total amount of incoming data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_packets_total', 'node_exporter', 'The total number of incoming packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_bytes_total', 'node_exporter', 'The total amount of outgoing data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_packets_total', 'node_exporter', 'The total number of outgoing packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_anon_bytes', 'node_exporter', 'Memory information field Active_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_bytes', 'node_exporter', 'Memory information field Active_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_file_bytes', 'node_exporter', 'Memory information field Active_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonHugePages_bytes', 'node_exporter', 'Memory information field AnonHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonPages_bytes', 'node_exporter', 'Memory information field AnonPages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Bounce_bytes', 'node_exporter', 'Memory information field Bounce_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Buffers_bytes', 'node_exporter', 'Memory information field Buffers_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Cached_bytes', 'node_exporter', 'Memory information field Cached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaFree_bytes', 'node_exporter', 'Memory information field CmaFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaTotal_bytes', 'node_exporter', 'Memory information field CmaTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CommitLimit_bytes', 'node_exporter', 'Memory information field CommitLimit_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Committed_AS_bytes', 'node_exporter', 'Memory information field Committed_AS_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap1G_bytes', 'node_exporter', 'Memory information field DirectMap1G_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap2M_bytes', 'node_exporter', 'Memory information field DirectMap2M_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap4k_bytes', 'node_exporter', 'Memory information field DirectMap4k_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Dirty_bytes', 'node_exporter', 'Memory information field Dirty_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HardwareCorrupted_bytes', 'node_exporter', 'Memory information field HardwareCorrupted_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Free', 'node_exporter', 'Memory information field HugePages_Free.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Rsvd', 'node_exporter', 'Memory information field HugePages_Rsvd.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Surp', 'node_exporter', 'Memory information field HugePages_Surp.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Total', 'node_exporter', 'Memory information field HugePages_Total.', 'Memory', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Hugepagesize_bytes', 'node_exporter', 'Memory information field Hugepagesize_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_anon_bytes', 'node_exporter', 'Memory information field Inactive_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_bytes', 'node_exporter', 'Memory information field Inactive_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_file_bytes', 'node_exporter', 'Memory information field Inactive_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_KernelStack_bytes', 'node_exporter', 'Memory information field KernelStack_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mapped_bytes', 'node_exporter', 'Memory information field Mapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemAvailable_bytes', 'node_exporter', 'Memory information field MemAvailable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemFree_bytes', 'node_exporter', 'Memory information field MemFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemTotal_bytes', 'node_exporter', 'Memory information field MemTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mlocked_bytes', 'node_exporter', 'Memory information field Mlocked_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_NFS_Unstable_bytes', 'node_exporter', 'Memory information field NFS_Unstable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_PageTables_bytes', 'node_exporter', 'Memory information field PageTables_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Shmem_bytes', 'node_exporter', 'Memory information field Shmem_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemHugePages_bytes', 'node_exporter', 'Memory information field ShmemHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemPmdMapped_bytes', 'node_exporter', 'Memory information field ShmemPmdMapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Slab_bytes', 'node_exporter', 'Memory information field Slab_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SReclaimable_bytes', 'node_exporter', 'Memory information field SReclaimable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SUnreclaim_bytes', 'node_exporter', 'Memory information field SUnreclaim_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapCached_bytes', 'node_exporter', 'Memory information field SwapCached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapFree_bytes', 'node_exporter', 'Memory information field SwapFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapTotal_bytes', 'node_exporter', 'Memory information field SwapTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Unevictable_bytes', 'node_exporter', 'Memory information field Unevictable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocChunk_bytes', 'node_exporter', 'Memory information field VmallocChunk_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocTotal_bytes', 'node_exporter', 'Memory information field VmallocTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocUsed_bytes', 'node_exporter', 'Memory information field VmallocUsed_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Writeback_bytes', 'node_exporter', 'Memory information field Writeback_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_WritebackTmp_bytes', 'node_exporter', 'Memory information field WritebackTmp_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InErrors', 'node_exporter', 'Statistic IcmpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InMsgs', 'node_exporter', 'Statistic IcmpInMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_OutMsgs', 'node_exporter', 'Statistic IcmpOutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InErrors', 'node_exporter', 'Statistic Icmp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InMsgs', 'node_exporter', 'Statistic Icmp6InMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_OutMsgs', 'node_exporter', 'Statistic Icmp6OutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip_Forwarding', 'node_exporter', 'Statistic IpForwarding.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_InOctets', 'node_exporter', 'Statistic Ip6InOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_OutOctets', 'node_exporter', 'Statistic Ip6OutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_InOctets', 'node_exporter', 'Statistic IpExtInOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_OutOctets', 'node_exporter', 'Statistic IpExtOutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_ActiveOpens', 'node_exporter', 'Statistic TcpActiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_CurrEstab', 'node_exporter', 'Statistic TcpCurrEstab.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_InErrs', 'node_exporter', 'Statistic TcpInErrs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_PassiveOpens', 'node_exporter', 'Statistic TcpPassiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_RetransSegs', 'node_exporter', 'Statistic TcpRetransSegs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenDrops', 'node_exporter', 'Statistic TcpExtListenDrops.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenOverflows', 'node_exporter', 'Statistic TcpExtListenOverflows.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesFailed', 'node_exporter', 'Statistic TcpExtSyncookiesFailed.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesRecv', 'node_exporter', 'Statistic TcpExtSyncookiesRecv.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesSent', 'node_exporter', 'Statistic TcpExtSyncookiesSent.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InDatagrams', 'node_exporter', 'Statistic UdpInDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InErrors', 'node_exporter', 'Statistic UdpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_NoPorts', 'node_exporter', 'Statistic UdpNoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_OutDatagrams', 'node_exporter', 'Statistic UdpOutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InDatagrams', 'node_exporter', 'Statistic Udp6InDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InErrors', 'node_exporter', 'Statistic Udp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_NoPorts', 'node_exporter', 'Statistic Udp6NoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_OutDatagrams', 'node_exporter', 'Statistic Udp6OutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite_InErrors', 'node_exporter', 'Statistic UdpLiteInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite6_InErrors', 'node_exporter', 'Statistic UdpLite6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_bytes_total', 'node_exporter', 'Network device statistic receive_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_compressed_total', 'node_exporter', 'Network device statistic receive_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_drop_total', 'node_exporter', 'Network device statistic receive_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_errs_total', 'node_exporter', 'Network device statistic receive_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_fifo_total', 'node_exporter', 'Network device statistic receive_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_frame_total', 'node_exporter', 'Network device statistic receive_frame.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_multicast_total', 'node_exporter', 'Network device statistic receive_multicast.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_packets_total', 'node_exporter', 'Network device statistic receive_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_bytes_total', 'node_exporter', 'Network device statistic transmit_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_carrier_total', 'node_exporter', 'Network device statistic transmit_carrier.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_colls_total', 'node_exporter', 'Network device statistic transmit_colls.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_compressed_total', 'node_exporter', 'Network device statistic transmit_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_drop_total', 'node_exporter', 'Network device statistic transmit_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_errs_total', 'node_exporter', 'Network device statistic transmit_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_fifo_total', 'node_exporter', 'Network device statistic transmit_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_packets_total', 'node_exporter', 'Network device statistic transmit_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_blocked', 'node_exporter', 'Number of processes blocked waiting for I/O to complete.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_running', 'node_exporter', 'Number of processes in runnable state.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_inuse', 'node_exporter', 'Number of FRAG sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_memory', 'node_exporter', 'Number of FRAG sockets in state memory.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_RAW_inuse', 'node_exporter', 'Number of RAW sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_sockets_used', 'node_exporter', 'Number of sockets sockets in state used.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_alloc', 'node_exporter', 'Number of TCP sockets in state alloc.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_inuse', 'node_exporter', 'Number of TCP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem', 'node_exporter', 'Number of TCP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem_bytes', 'node_exporter', 'Number of TCP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_orphan', 'node_exporter', 'Number of TCP sockets in state orphan.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_tw', 'node_exporter', 'Number of TCP sockets in state tw.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_inuse', 'node_exporter', 'Number of UDP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem', 'node_exporter', 'Number of UDP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem_bytes', 'node_exporter', 'Number of UDP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDPLITE_inuse', 'node_exporter', 'Number of UDPLITE sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_tcp_connection_states', 'node_exporter', 'Number of connection states.', 'Network', 'LOAD', 'Node', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgfault', 'node_exporter', '/proc/vmstat information field pgfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgmajfault', 'node_exporter', '/proc/vmstat information field pgmajfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgin', 'node_exporter', '/proc/vmstat information field pgpgin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgout', 'node_exporter', '/proc/vmstat information field pgpgout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpin', 'node_exporter', '/proc/vmstat information field pswpin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpout', 'node_exporter', '/proc/vmstat information field pswpout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_files_open', 'micrometer', 'The open file descriptor count', 'File', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_open_fds', 'micrometer', 'Number of open file descriptors.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_resident_memory_bytes', 'micrometer', 'Resident memory size in bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_virtual_memory_bytes', 'micrometer', '-', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_free', 'cadvisor', 'Number of available Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_total', 'cadvisor', 'Number of Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_current', 'cadvisor', 'Number of I/Os currently in progress', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_seconds_total', 'cadvisor', 'Cumulative count of seconds spent doing I/Os', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_weighted_seconds_total', 'cadvisor', 'Cumulative weighted I/O time in seconds', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_read_seconds_total', 'cadvisor', 'Cumulative count of seconds spent reading', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_bytes_total', 'cadvisor', 'Cumulative count of bytes read', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_merged_total', 'cadvisor', 'Cumulative count of reads merged', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_total', 'cadvisor', 'Cumulative count of reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_reads_total', 'cadvisor', 'Cumulative count of sector reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_writes_total', 'cadvisor', 'Cumulative count of sector writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_usage_bytes', 'cadvisor', 'Number of bytes that are consumed by the container on this filesystem.', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_write_seconds_total', 'cadvisor', 'Cumulative count of seconds spent writing', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_bytes_total', 'cadvisor', 'Cumulative count of bytes written', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_total', 'cadvisor', 'Cumulative count of writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_avail_bytes', 'node_exporter', 'Filesystem space available to non-root users in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_device_error', 'node_exporter', 'Whether an error occurred while getting statistics for the given device.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files', 'node_exporter', 'Filesystem total file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files_free', 'node_exporter', 'Filesystem total free file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_free_bytes', 'node_exporter', 'Filesystem free space in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_size_bytes', 'node_exporter', 'Filesystem size in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hitrate', 'cassandra_exporter', 'All time cache hit rate', 'Cache', 'LOAD', 'Cassandra', 'gauge', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hits_count', 'cassandra_exporter', 'Total number of cache hits', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_requests_count', 'cassandra_exporter', 'Total number of cache requests', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_client_connectednativeclients', 'cassandra_exporter', 'Number of clients connected to this nodes native protocol server', 'Connection', 'LOAD', 'Cassandra', 'gauge', NULL, '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_failures_count', 'cassandra_exporter', 'Number of transaction failures encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_latency_seconds_count', 'cassandra_exporter', 'Number of client requests latency seconds', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_timeouts_count', 'cassandra_exporter', 'Number of timeouts encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_unavailables_count', 'cassandra_exporter', 'Number of unavailable exceptions encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_completedtasks', 'cassandra_exporter', 'Total number of commit log messages written', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_totalcommitlogsize', 'cassandra_exporter', 'Current size, in bytes, used by all the commit log segments', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds', 'cassandra_exporter', 'Local range scan latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds_count', 'cassandra_exporter', 'Local range scan count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds', 'cassandra_exporter', 'Local read latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds_count', 'cassandra_exporter', 'Local read count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused', 'cassandra_exporter', 'Total disk space used belonging to this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds', 'cassandra_exporter', 'Local write latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds_count', 'cassandra_exporter', 'Local write count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_activetasks', 'cassandra_exporter', 'Number of tasks being actively worked on', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_completedtasks', 'cassandra_exporter', 'Number of tasks completed', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_pendingtasks', 'cassandra_exporter', 'Number of queued tasks queued up', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_totalblockedtasks_count', 'cassandra_exporter', 'Number of tasks that were blocked due to queue saturation', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cloudwatch_requests_total', 'cloudwatch', 'API requests made to CloudWatch', 'API', 'LOAD', 'AWS/Usage', 'counter', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_count', 'imxc_api_server', 'the number of error counts in 5s', NULL, 'ERROR', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_total', 'imxc_api_server', 'the total number of errors', NULL, 'ERROR', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_request_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_count', 'imxc_api_server', 'the number of requests counts in 5s', NULL, 'LOAD', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'gauge', 'protocol', '2019-12-10 11:22:00', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_total', 'imxc_api_server', 'the total number of requests', NULL, 'LOAD', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_connections', 'mongodb_exporter', 'The number of incoming connections from clients to the database server', 'Connection', 'LOAD', 'MongoDB', 'gauge', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_client', 'mongodb_exporter', 'The number of the active client connections performing read or write operations', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_current_queue', 'mongodb_exporter', 'The number of operations that are currently queued and waiting for the read or write lock', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_instance_uptime_seconds', 'mongodb_exporter', 'The number of seconds that the current MongoDB process has been active', 'Server', 'DURATION', 'MongoDB', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_memory', 'mongodb_exporter', 'The amount of memory, in mebibyte (MiB), currently used by the database process', 'Memory', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_metrics_document_total', 'mongodb_exporter', 'The total number of documents processed', 'Row', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_network_bytes_total', 'mongodb_exporter', 'The number of bytes that reflects the amount of network traffic', 'Network', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_op_counters_total', 'mongodb_exporter', 'The total number of operations since the mongod instance last started', 'Request', 'LOAD', 'MongoDB', 'counter', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_aborted_connects', 'mysqld_exporter', 'The number of failed attempts to connect to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_received', 'mysqld_exporter', 'The number of bytes received from all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_sent', 'mysqld_exporter', 'The number of bytes sent to all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_commands_total', 'mysqld_exporter', 'The number of times each XXX command has been executed', 'Request', 'LOAD', 'MySQL', 'counter', 'command', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_connections', 'mysqld_exporter', 'The number of connection attempts (successful or not) to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests', 'mysqld_exporter', 'The number of logical read requests', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests', 'mysqld_exporter', 'The number of writes done to the InnoDB buffer pool', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_read', 'mysqld_exporter', 'The amount of data read since the server was started (in bytes)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_reads', 'mysqld_exporter', 'The total number of data reads (OS file reads)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_writes', 'mysqld_exporter', 'The total number of data writes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_written', 'mysqld_exporter', 'The amount of data written so far, in bytes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_write_requests', 'mysqld_exporter', 'The number of write requests for the InnoDB redo log', 'Log', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_writes', 'mysqld_exporter', 'The number of physical writes to the InnoDB redo log file', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_os_log_written', 'mysqld_exporter', 'The number of bytes written to the InnoDB redo log files', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits', 'mysqld_exporter', 'The number of row locks currently being waited for by operations on InnoDB tables', 'Lock', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_time', 'mysqld_exporter', 'The total time spent in acquiring row locks for InnoDB tables, in milliseconds', 'Lock', 'DURATION', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits', 'mysqld_exporter', 'The number of times operations on InnoDB tables had to wait for a row lock', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_ops_total', 'mysqld_exporter', 'The number of rows operated in InnoDB tables', 'Row', 'LOAD', 'MySQL', 'counter', 'operation', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_table_locks_immediate', 'mysqld_exporter', 'The number of times that a request for a table lock could be granted immediately', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_connected', 'mysqld_exporter', 'The number of currently open connections', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_running', 'mysqld_exporter', 'The number of threads that are not sleeping', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_uptime', 'mysqld_exporter', 'The number of seconds that the server has been up', 'Server', 'DURATION', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_up', 'mysqld_exporter', 'Whether the last scrape of metrics from MySQL was able to connect to the server', 'NULL', 'ERROR', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_locks_count', 'postgres_exporter', 'Number of locks', 'Lock', 'LOAD', 'PostgreSQL', 'gauge', 'datname,mode', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_activity_count', 'postgres_exporter', 'number of connections in this state', 'Connection', 'LOAD', 'PostgreSQL', 'gauge', 'datname,state', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_read_time', 'postgres_exporter', 'Time spent reading data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_write_time', 'postgres_exporter', 'Time spent writing data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_hit', 'postgres_exporter', 'Number of times disk blocks were found already in the buffer cache', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_read', 'postgres_exporter', 'Number of disk blocks read in this database', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_bytes', 'postgres_exporter', 'Total amount of data written to temporary files by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_files', 'postgres_exporter', 'Number of temporary files created by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_deleted', 'postgres_exporter', 'Number of rows deleted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_fetched', 'postgres_exporter', 'Number of rows fetched by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_inserted', 'postgres_exporter', 'Number of rows inserted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_returned', 'postgres_exporter', 'Number of rows returned by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_updated', 'postgres_exporter', 'Number of rows updated by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_commit', 'postgres_exporter', 'Number of transactions in this database that have been committed', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_rollback', 'postgres_exporter', 'Number of transactions in this database that have been rolled back', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_up', 'postgres_exporter', 'Whether the last scrape of metrics from PostgreSQL was able to connect to the server', 'NULL', 'ERROR', 'PostgreSQL', 'gauge', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816000, '2019-08-19 06:14:22.616', '2019-08-19 06:14:22.616', false, 4, (select id from auth_resource2 where type='menu' and name='Infrastructure' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816001, '2019-08-19 06:14:22.635', '2019-08-19 06:14:22.635', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816002, '2019-08-19 06:14:22.638', '2019-08-19 06:14:22.638', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816003, '2019-08-19 06:14:22.64', '2019-08-19 06:14:22.64', false, 4, (select id from auth_resource2 where type='menu' and name='Namespace' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816004, '2019-08-19 06:14:22.643', '2019-08-19 06:14:22.643', false, 4, (select id from auth_resource2 where type='menu' and name='Nodes' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816005, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Node Details' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816006, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Resource Usage' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816009, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Persistent Volume' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816100, '2019-08-19 06:14:22.619', '2019-08-19 06:14:22.619', false, 4, (select id from auth_resource2 where type='menu' and name='Workloads' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816105, '2019-08-19 06:14:22.657', '2019-08-19 06:14:22.657', false, 4, (select id from auth_resource2 where type='menu' and name='Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816106, '2019-08-19 06:14:22.66', '2019-08-19 06:14:22.66', false, 4, (select id from auth_resource2 where type='menu' and name='Cron Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816107, '2019-08-19 06:14:22.646', '2019-08-19 06:14:22.646', false, 4, (select id from auth_resource2 where type='menu' and name='Pods' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816200, '2019-08-19 06:14:22.621', '2019-08-19 06:14:22.621', false, 4, (select id from auth_resource2 where type='menu' and name='Services' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816201, '2019-08-19 06:14:22.698', '2019-08-19 06:14:22.698', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816202, '2019-08-19 06:14:22.728', '2019-08-19 06:14:22.728', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816203, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816300, '2019-08-19 06:14:22.624', '2019-08-19 06:14:22.624', false, 4, (select id from auth_resource2 where type='menu' and name='Diagnosis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816301, '2019-08-19 06:14:22.705', '2019-08-19 06:14:22.705', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Diagnosis') ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816309, '2019-08-19 06:14:22.668', '2019-08-19 06:14:22.668', false, 4, (select id from auth_resource2 where type='menu' and name='Troubleshooting' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816400, '2019-08-19 06:14:22.627', '2019-08-19 06:14:22.627', false, 4, (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816401, '2019-08-19 06:14:22.671', '2019-08-19 06:14:22.671', false, 4, (select id from auth_resource2 where type='menu' and name='Performance Trends' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816402, '2019-08-19 06:14:22.731', '2019-08-19 06:14:22.731', false, 4, (select id from auth_resource2 where type='menu' and name='Alert Analysis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816403, '2019-08-19 06:14:22.674', '2019-08-19 06:14:22.674', false, 4, (select id from auth_resource2 where type='menu' and name='Alert History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816404, '2019-08-19 06:14:22.677', '2019-08-19 06:14:22.677', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816405, '2019-08-19 06:14:22.679', '2019-08-19 06:14:22.679', false, 4, (select id from auth_resource2 where type='menu' and name='Job History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816406, '2019-08-19 06:14:22.685', '2019-08-19 06:14:22.685', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816407, '2019-08-19 06:14:22.682', '2019-08-19 06:14:22.682', false, 4, (select id from auth_resource2 where type='menu' and name='Log Viewer' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816408, '2019-08-19 06:14:22.725', '2019-08-19 06:14:22.725', false, 4, (select id from auth_resource2 where type='menu' and name='Event Logs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816409, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Container Life Cycle' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816500, '2019-08-19 06:14:22.629', '2019-08-19 06:14:22.629', false, 4, (select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816501, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816502, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816550, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Dashboards' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816551, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816552, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816700, '2019-08-19 06:14:22.632', '2019-08-19 06:14:22.632', false, 4, (select id from auth_resource2 where type='menu' and name='Settings' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816701, '2019-08-19 06:14:22.687', '2019-08-19 06:14:22.687', false, 4, (select id from auth_resource2 where type='menu' and name='User & Group' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816702, '2019-08-19 06:14:22.69', '2019-08-19 06:14:22.69', false, 4, (select id from auth_resource2 where type='menu' and name='Alert' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816703, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Host Alerts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816704, '2019-08-19 06:14:22.693', '2019-08-19 06:14:22.693', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Settings' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816706, '2019-08-19 06:14:22.717', '2019-08-19 06:14:22.717', false, 4, (select id from auth_resource2 where type='menu' and name='Metric Meta' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816707, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='Notification' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816708, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='General' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816709, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='License' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816800, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Hosts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816801, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816802, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816803, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='List' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816804, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816805, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Group' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); + + + + +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (97, '2019-04-02 18:07:31.319', '2019-04-02 18:07:31.319', 'NODE CPU 사용', '(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m])) * 100))', 'Node CPU Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id }} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (1, '2019-04-15 02:26:13.826', '2019-04-15 02:26:24.02', 'NODE Disk 사용', '(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', {filter} }))) * 100', 'Node Disk Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Disk 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (119, '2019-04-02 18:08:50.17', '2019-04-02 18:08:50.17', 'NODE Memory 사용', '(1- ((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node''})) * 100', 'Node Memory Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Memory 사용률이 {threshold}%를 초과했습니다. 현재값 : {{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (2, '2019-04-15 05:27:56.544', '2019-04-15 05:27:59.924', 'Container CPU 사용', 'sum (rate (container_cpu_usage_seconds_total{ {filter} }[1m])) by (xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id) * 100', 'Container CPU Usage', 'controller', 'Cluster:{{$labels.xm_clst_id }} POD:{{$labels.xm_pod_id}} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_user','Container CPU User (%)','Container CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_working_set_bytes','Container Memory Working Set (GiB)','Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_working_set_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_io_seconds','Host io Disk seconds','Host disk io seconds','sum by (instance) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Disk IO Seconds:{{humanize $value}}|{threshold}.','2020-03-23 04:08:37.359','2020-03-23 04:08:37.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_write_byte','host disk R/W byte','host disk R/W byte','sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Read/Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2020-03-24 05:21:53.915','2020-03-24 05:24:52.674'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_free','Host Memory Free (GiB)','Memory information field MemFree_bytes','(node_memory_MemAvailable_bytes{{filter}} or (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:18.977','2020-03-23 04:08:18.977'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_sent','Number of Bytes Sent','The number of bytes sent to all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_sent[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Sent:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_namespace','Containe memory sum by namespace','Containe memory sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','memory','Namespace',NULL,false,false,'Container memory sum by namespace','2020-07-03 04:31:10.079','2020-07-03 08:38:17.034'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_count','Node Count','node count','count by(xm_clst_id, xm_namespace,xm_node_id) (up{{filter}})','Node','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} NODE:{{$labels.xm_node_id}} Node Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_restart_count','Container Restart Count','container restart count group by namespace','sum by(xm_clst_id, xm_namespace, pod_name ) (increase(imxc_kubernetes_container_restart_count{{filter}}[10s]))','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container Restart Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_usage','Node CPU Usage (%)','NODE CPU Usage','(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0) * 100)))','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency_device','Node Disk Read Latency per Device (ms)','Node Disk Read Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage_per_device','Node Filesystem Usage per device (%)','NODE Filesystem Usage per Device','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_usage','Node Memory Usage (%)','Node Memory Usage','sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_tablespace_size','Tablespace Size (GiB)','Generic counter metric of tablespaces bytes in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, tablespace, type) (oracledb_tablespace_bytes) / 1073741824','Tablespace','OracleDB','tablespace, type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Tablespace Size:{{humanize $value}}GiB|{threshold}GiB.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_allocated_size','Allocated Memory (MiB)','The total amount of memory that the Redis allocator allocated','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_allocated_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Allocated Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_kubernetes_event_count','Cluster events count','Kubernetes Namespace Events count','sum by (xm_clst_id, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Event Count:{{humanize $value}}|{threshold}.','2019-09-26 05:33:37.000','2020-04-27 05:38:47.804'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_limit','cluster_memory_limit (Gib)','Total container limit size in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Limits:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_total_count','Cluster Pod Total Count','Cluster Pod Total Count','sum by (xm_clst_id) (imxc_kubernetes_controller_counts{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Total Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_free','Host Swap Memory Free','Host Swap Free','node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:24.594','2020-03-23 04:08:24.594'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_context_switch_count','Host Context','Total number of context switches.','sum by (instance) (node_context_switches_total{{filter}})','CPU','Host',NULL,false,false,'None','2020-03-23 04:08:15.000','2020-03-23 04:08:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_used','Host system Filesystem used','Host File system used','sum by (instance) (node_filesystem_size_bytes{{filter}}-node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:30.407','2020-03-23 04:08:30.407'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_io','Node Disk I/O','Total seconds spent doing I/Os','avg by (xm_clst_id, xm_node_id) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:55.992'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage','Container Filesystem Usage (%)','Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_reads','Container Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_namespace','Container cpu sum by namespace','Container cpu sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Namespace',NULL,false,false,'.','2020-05-30 08:30:10.158','2020-06-09 02:00:50.856'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size','Node Filesystem Available Size (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_running_count','Node Pod Running Count','Node Pod Running Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Running Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-06 08:02:40.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_user','Pod CPU User (%)','Pod CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_reads','Pod Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Read Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_max_usage_bytes','Pod Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_max_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Max Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_receive','Pod Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Receive:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hits_count','Total number of cache hits (count/s)','Total number of cache hits','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_hits_count{{filter}}[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Counts per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:24:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_failures_count','Number of transaction failures encountered','Number of transaction failures encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_failures_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Failure Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_connections_and_tasks','Cassandra connections & tasks','cassandra connections & tasks','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "data_type", "Active tasks", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "data_type", "Pending tasks", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "data_type", "Client connections", "", "") )','Connection','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Connections and Tasks:{{humanize $value}}|{threshold}.','2020-01-02 09:11:48.000','2020-02-13 01:24:51.522'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_transmit','Pod Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Transmit:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_request','cluster_memory_request (Gib)','Total container memory request in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_count','Local read count (count/s)','Local read count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_readlatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_cpu{{filter}})','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Cluster CPU Capacity Cores:{{humanize $value}}|{threshold}.','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_alerts_received_count','Cluster alerts received count','Alert count by cluster','sum by (xm_clst_id, level) (ceil(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Alert Received Counts:{{humanize $value}}|{threshold}.','2019-08-23 04:41:49.000','2020-04-28 08:09:09.429'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_throttled_time','Container CPU Throttled Time','container cpu_throttled time','sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) (increase(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="", {filter}}[10s]))','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hitrate','All time cache hit rate','All time cache hit rate','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (cassandra_cache_hitrate {{filter}} * 100)','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-12-13 01:19:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_read_bytes','Bytes Read from All Instance Store Volumes (KiB)','Bytes read from all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_read_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_write_bytes','Bytes Written to All Instance Store Volumes (KiB)','Bytes written to all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_write_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebswrite_bytes','Bytes written to all EBS volumes (KiB)','Bytes written to all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebswrite_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_requests_count','Total number of cache requests (count/s)','Total number of cache requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_requests_count[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_latency','Local write latency (ms)','Local write latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_usage','Cluster Memory Usage (%)','All Nodes Memory Usage in cluster.','(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100','Memory','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-07-18 06:12:22.000','2020-04-22 04:59:14.251'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections_metrics_created_total','Incoming Connections Created','Count of all incoming connections created to the server (This number includes connections that have since closed)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_connections_metrics_created_total[1m]))','Connection','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Incoming Connections Created Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_disk_io','MySQL Disk I/O','MySQL Disk I/O','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_data_read[1m]), "data_type", "read", "", "") or +label_replace(rate(mysql_global_status_innodb_data_written[1m]), "data_type", "written", "", ""))','Disk','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} SVC:{{$labels.xm_service_name}} Mysql Disk IO:{{humanize $value}}|{threshold}.','2019-12-05 08:48:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_capacity_count','Cluster Pod Capacity Count','Cluster Pod Capacity Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Capacity Pod Counts:{{humanize $value}}|{threshold}.','2019-08-27 04:45:52.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_kubernetes_event_count','Namespace events count','Kubernetes Namespace Events count','sum by (xm_clst_id, xm_namespace, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Events:{{humanize $value}}|{threshold}.','2019-09-24 06:42:09.000','2019-09-24 06:42:34.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_capacity_cores','node_cpu_capacity_cores','node_cpu_capacity_cores','imxc_kubernetes_node_resource_capacity_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_allocatable_cores','node_cpu_allocatable_cores','node_cpu_allocatable_cores','imxc_kubernetes_node_resource_allocatable_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_capacity_count','Node Pod Capacity Count','Node Pod Capacity Count','imxc_kubernetes_node_resource_capacity_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Capacity Count of Pods:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_allocatable','node_memory_allocatable (Gib)','imxc_kubernetes_node_resource_allocatable_memory in GiB','imxc_kubernetes_node_resource_allocatable_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_limit','node_memory_limit (Gib)','Total container memory limit for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_readwritelatency_seconds','Cassandra Read/Write Latency (ms)','Cassandra Read/Write Latency (ms)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) or (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Keyspace Readwritelatency Seconds:{{humanize $value}}ms|{threshold}ms.','2019-10-23 01:46:07.000','2019-11-05 09:03:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_usage','Cluster CPU Usage (%)','All Nodes CPU Usage in cluster.','(100 - (avg by (xm_clst_id)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0)) * 100))','CPU','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-07-18 05:54:39.000','2020-04-22 04:59:14.253'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_received','Number of Bytes Received','The number of bytes received from all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_received[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Received:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_request','node_memory_request (Gib)','Total container memory request in GiB for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_tasks','Number of tasks','Number of tasks','sum by (task_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "task_type", "active", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "task_type", "pending", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "task_type", "connected", "", "") )','Task','Cassandra','task_type',true,false,'Number of tasks','2019-10-24 01:34:25.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_latency_seconds','Local latency seconds','Local latency seconds','sum by(type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(cassandra_keyspace_readlatency_seconds{quantile=''0.99'', {filter}}, "type", "read", "", "") or +label_replace(cassandra_keyspace_writelatency_seconds{quantile=''0.99'', {filter}}, "type", "write", "", "")) * 1000','Disk','Cassandra',NULL,true,true,'Local latency seconds','2019-10-24 02:14:45.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_concurrency','Wait-Time - Concurrency','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_concurrency[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Concurrency:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_pendingtasks','Number of queued tasks queued up','Number of queued tasks queued up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_pendingtasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Active Task:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_ready_count','Cluster Pod Ready Count','Cluster Pod Ready Count','sum by (xm_clst_id) (imxc_kubernetes_controller_ready{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Ready Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_allocatable_count','Node Pod Allocatable Count','Node Pod Allocatable Count','imxc_kubernetes_node_resource_allocatable_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Allocatable Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_conatiner_count','Container Type Sparselog Count','Container-type sparse log count by xm_clst_id, xm_namespace, xm_node_id, xm_pod_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_namespace, xm_node_id, xm_pod_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Pod",{filter}}[1m])))','SparseLog','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_connected','Number of Open Connections','The number of currently open connections','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_connected)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Open Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebsread_bytes','Bytes read from all EBS volumes (KiB)','Bytes read from all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebsread_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_cpu_usage','Namespace CPU Usage (%)','CPU Usage by namespace','sum by (xm_clst_id,xm_entity_type,xm_namespace) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'', {filter}}[1m])) * 100','CPU','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 01:06:05.000','2019-08-23 01:06:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_memory_usage','Namespace memory usage (Gib)','Memory usage by namespace in bytes / 1073741824','sum by (xm_clst_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'', {filter}}) / 1073741824','Memory','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 01:21:31.000','2019-08-23 01:21:31.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_free','Node Memory Free (GiB)','Memory information field MemFree_bytes / 1073741824','node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_cached','Node Swap Memory Cached (GiB)','Memory information field SwapCached_bytes / 1073741824','node_memory_SwapCached_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Cached Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_active_size','Active Memory (MiB)','The total amount of active memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_active_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Active Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_up','MySQL Up Count','Whether the last scrape of metrics from MySQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_up)','Instance','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Up counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_up','Oracle DB Up Count','Whether the Oracle database server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_up)','Instance','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle DB Up Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_process_count','Process Count','Gauge metric with count of processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_process_count)','Process','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Process Count Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_locks_count','Number of Locks','Number of locks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, mode) (pg_locks_count)','Lock','PostgreSQL','datname,mode',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Lock Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_updated','Number of Rows Updated','Number of rows updated by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_updated[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Updated Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_deleted','Number of Rows Deleted','Number of rows deleted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_deleted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Deleted Row counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_files','Number of Temporary Files Created','Number of temporary files created by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_files[1m]))','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load15','Node CPU Load 15m Average','Node CPU 15m load average','node_load15{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 15m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:27:39.000','2019-05-15 08:27:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_throttling','Node CPU Throttling','Number of times this cpu package has been throttled.','increase(node_cpu_package_throttles_total{xm_entity_type=''Node'',{filter}}[1m])','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Throttling Counts:{{humanize $value}}|{threshold}.','2019-05-15 08:29:24.000','2019-05-15 08:29:24.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_usage','Pod CPU Usage (%)','Pod CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_system','Pod CPU System (%)','Pod CPU Usage (System)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage_bytes','Pod Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Used Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_limit_bytes','Pod Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Limit Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load5','Node CPU Load 5m Average','Node CPU 5m load average','node_load5{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 5m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:26:07.000','2019-05-15 08:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_client_connectednativeclients','Number of Client Connections','Number of clients connected to this nodes native protocol server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_client_connectednativeclients)','Connection','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-07 11:59:04.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_activetasks','Number of tasks being actively worked on','Number of tasks being actively worked on','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_activetasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cloudwatch_requests_count','API requests made to CloudWatch','API requests made to CloudWatch','sum by (xm_clst_id, namespace, action) (rate(cloudwatch_requests_total{{filter}}[10m]))','Request','AWS/Usage',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.namespace}} CloudWatch API Call Volume:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_out','Bytes Sent Out on All Network Interfaces (KiB)','The number of bytes sent out on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_out_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_in','Bytes Received on All Network Interfaces (KiB)','The number of bytes received on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_in_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_count','Namespace Pod Count','Pod count by namesapce','count (sum (container_last_seen{{filter}}) by (xm_clst_id, xm_namespace, xm_pod_id)) by (xm_clst_id, xm_namespace)','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Pod Counts:{{humanize $value}}|{threshold}.','2019-08-22 16:53:32.000','2019-08-23 01:06:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage','Node Filesystem Usage (%)','NODE Filesystem Usage','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_available','Node Memory Available (GiB)','Memory information field MemAvailable_bytes / 1073741824','node_memory_MemAvailable_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Avail Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_total','Node Memory Total (GiB)','Memory information field MemTotal_bytes / 1073741824','node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive','Node Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:07:46.000','2019-05-31 17:45:22.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit','Node Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:09:05.000','2019-05-31 17:46:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_allocated_count','Cluster Pod Allocated Count','Cluster Pod Allocated Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_allocatable_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Allocated Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_desired_count','Cluster Pod Desired Count','Cluster pod desired count by controller','sum by (xm_clst_id) (imxc_kubernetes_controller_replicas{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Desired Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 02:26:55.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_commands_total','Number of Commands Executed','The number of times each XXX command has been executed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, command) (rate(mysql_global_status_commands_total[1m]) > 0)','Request','MySQL','command',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Commands Executed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-12 08:20:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_running','Number of Threads Running','The number of threads that are not sleeping','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_running)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_dbname_state','Count by dbname and state in pg','count by dbname and state in pg','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (pg_stat_activity_count)','Connection','PostgreSQL','state',true,false,'count by dbname and state in pg','2020-01-30 06:10:54.000','2020-01-31 11:33:41.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_alerts_received_count','Namespace alerts received count','Alert count by namespace','sum by (xm_clst_id, xm_namespace, level) (floor(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Alert Count:{{humanize $value}}|{threshold}.','2019-08-23 04:43:29.000','2019-08-23 04:43:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_reads_count_device','Node Disk Reads Count per Device (IOPS)','Node Disk Reads Count per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_reads_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Reads Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency','Node Disk Read Latency (ms)','Node Disk Read Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 10:59:07.000','2019-05-31 17:46:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency_device','Node Disk Write Latency per Device (ms)','Node Disk Write Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes','Node Disk Write Bytes (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size_device','Node Filesystem Available Size per Device (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size_device','Node Filesystem Free Size per Device (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size_device','Node Filesystem Total Size per Device (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_free','Node Swap Memory Free (GiB)','Memory information field SwapFree_bytes / 1073741824','node_memory_SwapFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_total','Node Swap Memory Total (GiB)','Memory information field SwapTotal_bytes / 1073741824','node_memory_SwapTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_up','PostgreSQL Up Count','Whether the last scrape of metrics from PostgreSQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (pg_up)','Instance','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Instance Count:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests','Number of Writes to Buffer Pool','The number of writes done to the InnoDB buffer pool','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_write_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Writes to Buffer Pool Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests','Number of Logical Read Requests','The number of logical read requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_read_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Logical Read Requests Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_read','Amount of Data Read','The amount of data read since the server was started (in bytes)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_read[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Read Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_os_log_written','Number of Bytes Written to Redo Log','The number of bytes written to the InnoDB redo log files','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_os_log_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Written to Redo Log Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_written','Amount of Data Written','The amount of data written so far, in bytes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Written Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pod','Container Memory Request/Limits vs Used by Pod','container_memory_sum_by_pod','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,true,false,'Container memory sum by pod (limit, request, used)','2020-07-22 21:44:33.000','2020-07-22 21:44:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_cache_hit_ratio','Buffer Cache Hit Ratio','Buffer Cache Hit Ratio','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ( +(1 - increase(mysql_global_status_innodb_buffer_pool_reads [1h]) / increase(mysql_global_status_innodb_buffer_pool_read_requests [1h])) * 100)','Block','MySQL',NULL,true,false,'.','2019-12-05 07:47:50.000','2019-12-13 01:17:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_cluster','Container CPU Request/Limits vs Used by Cluster','Container cpu sum by cluster (capacity, limit, request, usage)','sum by(xm_clst_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} *0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})*0.001, "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})*0.001, "data_type", "request", "" , "") or +label_replace(sum by(xm_clst_id)(rate(container_cpu_usage_seconds_total{{filter}}[1m])), "data_type", "used", "" , ""))','CPU','Cluster',NULL,true,false,'Container cpu sum by cluster','2020-07-22 17:49:53.000','2020-07-22 17:49:53.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size','Node Filesystem Total Size (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size','Node Filesystem Free Size (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pod','Container CPU Request/Limits vs Used by Pod','Container cpu sum by pod (capacity, limit, request, usage)','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type)( +label_replace (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "", "") or +label_replace (imxc_kubernetes_container_resource_limit_cpu{{filter}}*0.001, "data_type", "limit", "", "") or +label_replace (imxc_kubernetes_container_resource_request_cpu{{filter}}*0.001, "data_type", "request", "", "") +)','CPU','Pod',NULL,true,false,'Container cpu sum by Pod','2020-07-22 21:37:45.000','2020-07-22 21:37:45.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_lockmode','Count_by_lockmode','Count by lockmode','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, mode) (pg_locks_count)','Lock','PostgreSQL','mode',true,false,'Count by lockmode','2020-01-30 07:06:13.000','2020-01-30 07:06:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits','Number of Row Locks ','The number of row locks currently being waited for by operations on InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_innodb_row_lock_current_waits)','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_capacity','cluster_memory_capacity (Gib)','imxc_kubernetes_node_resource_capacity_memory','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Capacity:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:46:58.000','2020-05-27 09:05:56.427'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_free','Host system Filesystem free','Host File system free','sum by (instance) (node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Free Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:29.025','2020-03-23 04:08:29.025'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total','Host system Filesystem total','Host File system total','sum by (instance) (node_filesystem_size_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Total Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:27.634','2020-03-23 04:08:27.634'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_used','Host Swap Memory Used','Host Swap Used','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Used Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:26.169','2020-03-23 04:08:26.169'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes_device','Node Disk Read Bytes per Device (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes','Node Disk Read Bytes (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_rollback','Number of Transactions Rolled Back','Number of transactions in this database that have been rolled back','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_rollback[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Rollback Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_commit','Number of Transactions Committed','Number of transactions in this database that have been committed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_commit[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Commit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_ops_total','Number of Rows Operated','The number of rows operated in InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, operation) (rate(mysql_global_status_innodb_row_ops_total[1m]))','Row','MySQL','operation',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Rows Operated Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_immediate','Number of Table Lock Immediate','The number of times that a request for a table lock could be granted immediately','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_immediate[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Immediate Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_count','Local range scan count (count/s)','Local range scan count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_rangelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_waited','Number of Table Lock Waited','The number of times that a request for a table lock could not be granted immediately and a wait was needed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_waited[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Waited Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_time','Time Spent Reading Data File Blocks (ms)','Time spent reading data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_read_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_write_time','Time Spent Writing Data File Blocks (ms)','Time spent writing data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_write_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Write Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_read','Number of Disk Blocks Read','Number of disk blocks read in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_read[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_hit','Number of Block Cache Hit','Number of times disk blocks were found already in the buffer cache','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_hit[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Hit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_activity_count','Number of Client Connections','number of connections in this state','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, state) (pg_stat_activity_count{{filter}})','Connection','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Connection Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-11-18 04:16:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_fetched','Number of Rows Fetched','Number of rows fetched by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_fetched[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Fetched Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_inserted','Number of Rows Inserted','Number of rows inserted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_inserted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Inserted Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_latency','Local range scan latency (ms)','Local range scan latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_rangelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_size','Size used by commit log segments (KiB/s)','Current size, in bytes, used by all the commit log segments / 1024','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_totalcommitlogsize[1m]){{filter}}) / 1024','Log','Cassandra',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Volume:{{humanize $value}}KiB/s|{threshold}KiB/s.','2019-10-02 10:17:01.000','2019-11-05 08:07:03.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_messages','Number of commit log messages written (count/s)','Total number of commit log messages written','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_completedtasks[1m]))','Log','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Message per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_count','Number of client requests (count/s)','Number of client requests by request type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_latency_seconds_count{{filter}}[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Client Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:04:25.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_active','Node Memory Active (GiB)','Memory information field Active_bytes in GiB','node_memory_Active_bytes{xm_entity_type=''Node'', {filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Active Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_returned','Number of Rows Returned','Number of rows returned by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_returned[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Returned Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_count','Local write count (count/s)','Local write count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_writelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_cluster','Container Memory Request/Limits vs Used by Cluster','Container memory sum by cluster','sum by (xm_clst_id, data_type)( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity", "" , "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "", "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "", "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Cluster',NULL,true,false,'Container memory sum by cluster','2020-07-22 21:23:15.000','2020-07-22 21:23:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_capacity','node_memory_capacity (Gib)','node memory capacity in GiB','imxc_kubernetes_node_resource_capacity_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:46:58.000','2019-08-23 08:46:58.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_request_cores','cluster_cpu_request_cores','cluster_cpu_request_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_request_cores','node_cpu_request_cores','node_cpu_request_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_limit_cores','cluster_cpu_limit_cores','cluster_cpu_limit_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_limit_cores','node_cpu_limit_cores','node_cpu_limit_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_unavailables_count','Number of unavailable exceptions encountered','Number of unavailable exceptions encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_unavailables_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Unavailable Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_up','Cassandra Up Count','Whether the last scrape of metrics from Cassandra was able to connect to the server','count by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_bufferpool_size{{filter}})','Instance','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Instances:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 17:01:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_up','MongoDB Up Count','The number of seconds that the current MongoDB process has been active','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_instance_uptime_seconds[1m]))','Instance','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Up Count Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_current_queue','Number of Operations Waiting','The number of operations that are currently queued and waiting for the read or write lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_current_queue)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Waiting Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_client','Number of Active Client','The number of the active client connections performing read or write operations','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_client)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Active Client Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_metrics_document_total','Number of Documents Processed','The total number of documents processed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_metrics_document_total[1m]))','Row','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Documents Processed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused','Total disk space used (GiB)','Total disk space used belonging to this keyspace / 1073741824','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_totaldiskspaceused {{filter}}) / 1073741824','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Disk Space:{{humanize $value}}GiB|{threshold}GiB.','2019-10-02 10:17:01.000','2019-11-07 01:14:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_latency','Local read latency (ms)','Local read latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_totalblockedtasks','Number of tasks that were blocked (count/s)','Number of tasks that were blocked due to queue saturation in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_totalblockedtasks_count[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Blocked Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_completedtasks','Number of tasks completed (count/s)','Number of tasks completed in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_completedtasks{{filter}}[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Pending Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-05 08:08:57.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_memory','Amount of Memory, in MebiByte','The amount of memory, in mebibyte (MiB), currently used by the database process','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_memory)','Memory','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Memory:{{humanize $value}}MiB|{threshold}MiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_resource_utilization','Resource Usage','Gauge metric with resource utilization','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) (oracledb_resource_current_utilization)','Resource','OracleDB','resource_name',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Resource Usage:{{humanize $value}}%|{threshold}%.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_timeouts_count','Number of timeouts encountered','Number of timeouts encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_timeouts_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Timeout Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_network_bytes_total','Amount of Network Traffic','The number of bytes that reflects the amount of network traffic','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_network_bytes_total[1m]))','Network','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Network Traffic Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_op_counters_total','Number of Operations','The total number of operations since the mongod instance last started','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (rate(mongodb_op_counters_total[1m]))','Request','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits','Number of Waits for Row Locks','The number of times operations on InnoDB tables had to wait for a row lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_row_lock_waits[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Waits for Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_execute_count','Execute Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_execute_count[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Execute Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_commits','User Commits','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_commits[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_parse_count','Parse Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_parse_count_total[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Parse Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_rollbacks','User Rollbacks','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_rollbacks[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Rollback:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_writes','Pod Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage','Pod Memory Usage (%)','Pod Memory Usage Compared to Limit','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024)','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Utillization:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage_bytes','Pod Memory Used (GiB)','Current memory usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_cache_hit_ratio','Buffer Cache Hit Ratio (%)','Number of Block Cache Hit / (Number of Block Cache Hit & Blocks Reads) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (increase(pg_stat_database_blks_hit[1h]) / (increase(pg_stat_database_blks_read[1h]) + increase(pg_stat_database_blks_hit[1h])) * 100)','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-08-27 15:49:21.000','2019-12-13 01:33:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_other','Wait-Time - Other','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_other[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Other:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_configuration','Wait-Time - Configuration','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_configuration[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Configuration{{humanize $value}}|{threshold}','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_commit','Wait-Time - Commit','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_commit[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_scheduler','Wait-Time - Scheduler','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_scheduler[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Scheduler:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_system_io','Wait-Time - System I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_system_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - System I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_user_io','Wait-Time - User I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_user_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - User I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_network','Wait-Time - Network','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_network[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Network:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_blocked_clients','Blocked Clients','Number of clients pending on a blocking call (BLPOP, BRPOP, BRPOPLPUSH)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_blocked_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Blocked Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connected_clients','Connected Clients','Number of client connections (excluding connections from replicas)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_connected_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Connected Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connections_received','Received Connections','Total number of connections accepted by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_connections_received_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Received Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_rejected_connections','Rejected Connections','Number of connections rejected because of maxclients limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_rejected_connections_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Rejected Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_up','Redis Up Count','Whether the Redis server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_up)','Instance','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Up Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_total','Call Count / Command','Total number of calls per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_total[1m]))','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Call Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_processed','Processed Commands','Total number of commands processed by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_commands_processed_total[1m]))','Request','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace:}} Redis Processed Commands:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_key_hit_raito','Redis key hit raito','redis key hit raito','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_keyspace_hits_total [1m]), "data_type", "hits", "" , "") or +label_replace(rate(redis_keyspace_misses_total [1m]), "data_type", "misses", "" , "") )','Keyspace','Redis','data_type',true,false,'redis key hit raito','2020-01-29 02:28:03.000','2020-02-13 00:46:27.568'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_net_byte_total','Network byte','Network byte','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_net_input_bytes_total [1m]), "data_type", "input", "", "") or +label_replace(rate(redis_net_output_bytes_total [1m]), "data_type", "output", "", ""))','Network','PostgreSQL','data_type',true,false,'Network byte','2020-01-30 07:22:12.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_cache','Pod Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_cache{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_swap','Pod Memory Swap (GiB)','Pod swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_swap{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_total','Oracledb wait time total','oracledb wait time total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_wait_time_scheduler[1m]), "data_type", "scheduler", "", "") or +label_replace(rate(oracledb_wait_time_commit[1m]), "data_type", "commit", "", "") or +label_replace(rate(oracledb_wait_time_network[1m]), "data_type", "network", "", "") or +label_replace(rate(oracledb_wait_time_concurrency[1m]), "data_type", "concurrency", "", "") or +label_replace(rate(oracledb_wait_time_Configuration[1m]), "data_type", "configuration", "", "") or +label_replace(rate(oracledb_wait_time_user_io[1m]), "data_type", "user_io", "", "") or +label_replace(rate(oracledb_wait_time_system_io[1m]), "data_type", "system_io", "", "") or +label_replace(rate(oracledb_wait_time_other[1m]), "data_type", "other", "", ""))','Wait','OracleDB','data_type',true,false,'oracledb wait time total','2020-01-29 11:03:20.000','2020-02-13 01:08:01.629'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_count','Oracledb activity count','oracledb activity count','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_execute_count [1m]), "data_type", "excutecount", "", "") or +label_replace(rate(oracledb_activity_parse_count_total[1m]), "data_type", "parse_count", "", "") )','Request','OracleDB','data_type',true,false,'oracledb activity count','2020-01-29 10:40:58.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_transaction','Oracledb transaction','oracledb transaction','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_user_rollbacks[1m]), "data_type", "rollbacks", "", "") or +label_replace(rate(oracledb_activity_user_commits[1m]), "data_type", "commits", "", ""))','Request','OracleDB','data_type',true,false,'oracledb transaction','2020-01-29 11:20:47.000','2020-02-13 01:26:28.558'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_cpu_usage','Redis cpu usage','redis cpu usage','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_used_cpu_sys [1m]), "data_type", "system", "", "") or +label_replace(rate(redis_used_cpu_user [1m]), "data_type", "user", "", "") )','CPU','Redis','data_type',true,false,'redis cpu usage','2020-01-29 01:56:58.000','2020-02-12 04:47:21.228'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_total_load','host total load','host total load','sum by (instance, data_type) ( +label_replace(node_load1 {{filter}}, "data_type", "load 1", "", "") or +label_replace(node_load5 {{filter}}, "data_type", "load 5", "", "") or +label_replace(node_load15 {{filter}}, "data_type", "load15", "", "") )','CPU','Host',NULL,false,false,'host total load','2020-04-01 08:10:26.588','2020-04-03 01:23:47.665'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys_children','System CPU Used Background','System CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used Backedground:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_hits','Keyspace Hits','Number of successful lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_hits_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Hits:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_misses','Keyspace Misses','Number of failed lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_misses_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Misses:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys','DB Keys Count','Total number of keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_expired_keys','Expired Keys','Total number of key expiration events','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_expired_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Expired Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_evicted_keys','Evicted Keys','Number of evicted keys due to maxmemory limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_evicted_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Evicted Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys_expiring','DB Keys Count Expiring','Total number of expiring keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys_expiring)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count Expiring:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_duration_seconds','Duration Seconds / Command','Total duration seconds per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_duration_seconds_total[1m]) * 1000)','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Duration Seconds:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-29 01:42:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_total','Redis memory total','redis memory total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(redis_allocator_active_bytes / 1048576, "data_type", "active", "" , "") or +label_replace(redis_memory_used_bytes / 1048576, "data_type", "used", "" , "") or +label_replace(redis_allocator_allocated_bytes / 1048576, "data_type", "allocated", "" , "") or +label_replace(redis_allocator_resident_bytes / 1048576, "data_type", "resident", "" , "") )','Memory','Redis','data_type',true,false,'redis memory total','2020-01-29 02:08:28.000','2020-02-13 00:45:28.475'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('count_by_connection_type','Count by connection type','count by connection type','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_connections_received_total [1m]), "data_type", "received connections", "", "") or +label_replace(rate(redis_rejected_connections_total [1m]), "data_type", "rejected connections", "", "") or +label_replace(redis_connected_clients, "data_type", "connected clients", "", "") or +label_replace(redis_blocked_clients, "data_type", "blocked clients", "", "") )','Connection','Redis','data_type',true,false,'count by connection type','2020-01-29 00:49:09.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_count','Number of row by stat','Number of row by stat','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_tup_deleted[1m]), "data_type", "deleted", "", "") or +label_replace(rate(pg_stat_database_tup_updated[1m]), "data_type", "updated", "", "") or +label_replace(rate(pg_stat_database_tup_inserted[1m]), "data_type", "inserted", "", "") or +label_replace(rate(pg_stat_database_tup_returned[1m]), "data_type", "returned", "", "") or +label_replace(rate(pg_stat_database_tup_fetched[1m]), "data_type", "fetched", "", "") )','Row','PostgreSQL','data_type',true,true,'Number of row by stat','2019-10-28 07:29:26.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_write_time','Read/Write spent time by file blocks','Read/Write spent time by file blocks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_blk_read_time [1m]), "data_type", "read", "", "") or +label_replace(rate(pg_stat_database_blk_write_time [1m]), "data_type", "write", "", ""))','Block','PostgreSQL','data_type',true,false,'Read/Write spent time by file blocks','2019-10-28 10:56:48.000','2020-02-13 01:06:46.680'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_resident_size','Resident Memory (MiB)','The total amount of resident memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_resident_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Resident Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_used_size','Used Memory (MiB)','Total number of bytes allocated by Redis using its allocator','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_memory_used_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Used Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_anormal_count','Number of anormal request','Number of anormal request ','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, anormal_type) +(label_replace(rate(cassandra_clientrequest_unavailables_count[1m]), "anormal_type", "unavailables", "", "") or +label_replace(rate(cassandra_clientrequest_timeouts_count[1m]), "anormal_type", "timeouts", "", "") or +label_replace(rate(cassandra_clientrequest_failures_count[1m]), "anormal_type", "failures", "", ""))','Request','Cassandra','anomal_type',true,false,'Number of anormal request ','2019-10-28 02:09:45.000','2020-02-13 01:16:24.862'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog','Commitlog count and size','Commitlog count and size','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(rate(cassandra_commitlog_completedtasks {{filter}}[1m]), "data_type", "log_count", "", "") or +label_replace(rate(cassandra_commitlog_totalcommitlogsize {{filter}}[1m]) / 1048576, "data_type", "log_size", "", ""))','Log','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-24 10:44:47.000','2020-02-13 01:16:24.864'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_threads_total','Number of Threads','Number of Threads','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_threads_running, "data_type", "active", "", "") or +label_replace(mysql_global_status_threads_connected, "data_type", "connected", "", "") or +label_replace(rate(mysql_global_status_connections [1m]), "data_type", "connection attempts[1m]", "", "") )','Thread','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-05 06:04:21.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_read_write_count','Local read write count','Local read write count','sum by(xm_clst_id, xm_namespace, xm_node_id, instance, type) +(label_replace( rate(cassandra_keyspace_readlatency_seconds_count [1m]), "type", "read", "", "") or +label_replace( rate(cassandra_keyspace_writelatency_seconds_count [1m]), "type", "write", "", ""))','Disk','Cassandra','type',true,true,'Local read write count','2019-10-24 05:18:50.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_lock_total','Oracledb lock total','oracledb lock total','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) +(oracledb_resource_current_utilization{resource_name =~''.+_locks''})','Resource','OracleDB','resource_name',true,false,'oracledb lock total','2020-01-29 11:17:01.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec_by_api','Service HTTP Requests Count by API (per Second)','the number of HTTP requests counts per second by API','(sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value)','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_core_count','Host CPU Core Count','Host_cpu_capacity_cores','count without(cpu, mode) (node_cpu_seconds_total{{filter}})','CPU','Host',NULL,true,false,'None','2020-03-23 04:08:05.290','2020-03-23 04:08:05.290'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load5','Host CPU Load 5m Average','Host CPU 5m load average','node_load5{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 5m Load Average:{{humanize $value}}%|{threshold}$.','2020-03-23 04:08:11.655','2020-03-23 04:08:11.655'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_cluster','Pod Phase Count by Cluster','pod phase count by cluster','count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))','Cluster','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_network_io_byte','host network io byte','host network io byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )','Network','Host',NULL,false,false,'host network io byte','2020-03-24 05:48:31.359','2020-03-24 05:48:31.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_contextswitch_and_filedescriptor','host contextswitch and filedescriptor','host contextswitch and filedescriptor','sum by (data_type, instance) ( +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "Context switch", "", "") or +label_replace(node_filefd_allocated {{filter}}, "data_type", "File descriptor", "", "") )','OS','Host',NULL,false,false,'host contextswitch and filedescriptor','2020-03-24 09:05:51.828','2020-03-24 09:08:06.867'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_usage','Host Swap Memory Usage (%)','Host Swap Memory Usage','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}} / node_memory_SwapTotal_bytes{{filter}} +','Memory','Host',NULL,true,false,'None','2020-03-26 06:39:21.333','2020-03-26 06:39:21.333'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_boot_time','Host Boot time','Host Boot time','node_boot_time_seconds{{filter}}','CPU','Host',NULL,true,false,'None','2020-03-26 08:03:46.189','2020-03-26 08:03:46.189'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_latency','Host read Disk latency','Host disk read latency','sum by (instance) (rate(node_disk_reads_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_read_time_seconds_total{{filter}}[1m])/rate(node_disk_reads_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Read Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:34.001','2020-03-23 04:08:34.001'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_write_latency','Host write Disk latency','Host disk write latency','sum by (instance) (rate(node_disk_writes_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_write_time_seconds_total{{filter}}[1m])/rate(node_disk_writes_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Write Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:35.823','2020-03-23 04:08:35.823'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_usage','Host Memory Usage (%)','Host Memory Usage ','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Usage:{{humanize $value}}%|{threshold}%.','2020-03-26 06:36:47.931','2020-03-26 06:36:47.931'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_total','Host Memory Total (GiB)','Memory information field MemTotal_bytes','node_memory_MemTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:16.897','2020-03-23 04:08:16.897'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_bytes_received_sent','Bytes Received & Sent in MySQL','Bytes Received & Sent in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_bytes_received [1m]), "data_type", "received", "", "") or +label_replace(rate(mysql_global_status_bytes_sent [1m]), "data_type", "sent", "", ""))','Network','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}|{threshold}.','2019-12-05 07:58:11.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_95th','Service HTTP 95% Elapsed Time (ms)','the maximum time taken to servce the 95% of HTTP requests','histogram_quantile(0.95, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 95th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_99th','Service HTTP 99% Elapsed Time (ms)','the maximum time taken to servce the 99% of HTTP requests','histogram_quantile(0.99, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 99th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_error_rate','Service Pod HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Pod Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-11-07 07:52:24.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_90th','Service HTTP 90% Elapsed Time (ms)','the maximum time taken to servce the 90% of HTTP requests','histogram_quantile(0.90, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 90th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total_by_mountpoint','host filesystem size by mountpoint','host filesystem size by mountpoint','sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))','Filesystem','Host',NULL,false,false,'host filesystem size by mountpoint','2020-03-30 04:01:45.322','2020-03-30 05:16:32.252'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_timeline_count','Namespace timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id, xm_namespace, level)','Timeline','Namespace',NULL,false,false,'None','2020-04-08 06:21:21.392','2020-04-08 06:21:21.392'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_timeline_count','Cluster timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id,level)','Timeline','Cluster',NULL,false,false,'None','2020-04-08 06:19:32.792','2020-04-28 08:07:47.786'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_transmit','Cluster Network Transmit','Cluster Network Transmit','sum by (xm_clst_id) (rate(node_network_transmit_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Transmit','2020-04-28 08:10:21.070','2020-04-28 08:29:18.491'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_receive','Cluster Network Receive','Cluster Network Receive','sum by (xm_clst_id) (rate(node_network_receive_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Receive','2020-04-28 08:07:26.294','2020-04-28 08:29:18.486'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_running_count','Namespace Pod Running Count','Running pod count by namespace','count by (xm_clst_id, xm_namespace) (sum by (xm_clst_id, xm_node_id, xm_namespace, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Namespace',NULL,false,false,'None','2020-05-21 01:18:06.016','2020-05-21 01:18:06.016'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_request','Pod CPU Request','Pod CPU Request','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_io_byte','Node Network IO byte','Node Network IO byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", "") )','Network','Node',NULL,false,false,'Node Network IO byte','2020-05-21 07:32:03.535','2020-05-21 07:32:03.535'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_request','pod_memory_request (Gib)','Total container memory request in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_node','Container memory sum by node','Container memory sum by node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity" , "", "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_working_set_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Node',NULL,false,false,'Container memory sum by node','2020-05-28 09:36:44.000','2020-06-09 01:38:10.694'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_context_switches','Node Context Switches','Node Context Switches','rate(node_context_switches_total {{filter}}[1m])','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:05.521'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_write_byte','Node disk read and write bytes','Node disk read and write bytes','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]), "data_type", "Read" , "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]), "data_type", "Write", "" , "") +)','Disk','Node',NULL,false,false,'Node disk read and write bytes','2020-05-28 13:02:44.729','2020-05-28 13:04:35.126'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_total','Host Swap Memory Total','Host Swap Total','node_memory_SwapTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:23.130','2020-03-23 04:08:23.130'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_iowait','Host CPU iowait','Host CPU iowait','avg by (instance) (rate(node_cpu_seconds_total{mode=''iowait'',{filter}}[1m])) * 100','CPU','Host',NULL,false,false,'Host:{{$labels.instance}} CPU IO wait:{{humanize $value}}|{threshold}.','2020-03-26 08:03:51.307','2020-03-26 08:03:51.307'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_filefd_allocated','Host statistics Filesystem allocated.','Host File descriptor statistics: allocated.','sum by (instance) (node_filefd_allocated{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem allocated:{{humanize $value}}|{threshold}.','2020-03-23 04:08:31.970','2020-03-23 04:08:31.970'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg','Service HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests','sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) == 0 or +sum (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) +/ sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace)','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-10-15 09:37:44.000','2020-03-09 06:42:14.172'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate_by_api','Service HTTP Requests Error Rate by API','the number of HTTP error counts by API / the number of HTTP requests counts by API','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) ==0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg_by_api','Service HTTP Average Elapsed Time by API (ms)','the average time taken to serve the HTTP requests by API for a service','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.500'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_used','Node CPU Used (Cores)','Node CPU Used (Cores)','(100 - (avg by (xm_clst_id, xm_node_id) (clamp_max(rate(node_cpu_seconds_total{name="node-exporter", mode="idle", xm_entity_type="Node", {filter}}[1m]),1.0)) * 100)) * sum by(xm_clst_id, xm_node_id)(imxc_kubernetes_node_resource_capacity_cpu{{filter}}) / 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:35.939'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_iowait','Node CPU I/O Wait','Node CPU I/O Wait','avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{name="node-exporter", mode="iowait", xm_entity_type="Node" , {filter}}[1m])) * 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:20.633'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_node','Container cpu sum by Node','Container cpu sum by Node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} * 0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001), "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001), "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Node',NULL,false,false,'Container cpu sum by Node','2020-05-28 08:06:35.736','2020-06-09 01:46:12.446'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops_per_device','Node Disk IOPs per device','Node Disk I/O Operations Per Second (per device)','sum by (xm_clst_id, xm_node_id, device) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node','device',false,false,'None','2020-06-10 05:56:05.311','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops','Node Disk IOPs','Node Disk I/O Operations Per Second','sum by (xm_clst_id, xm_node_id) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-06-10 05:54:01.309','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_iops','Host Disk IOPs','Host Disk IOPs','sum by (instance) ((rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or (rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))','Disk','Node',NULL,false,false,'Host Disk IOPs','2020-06-10 07:26:28.895','2020-06-10 07:26:28.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_limit','Pod CPU Limit','Pod CPU Limit','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_limit','pod_memory_limit (Gib)','Total container memory limit in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage_bytes','Container Memory Used (GiB)','Current memory usage in GiB, this includes all memory regardless of when it was accessed','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_used','Node Memory Used (GIB)','Node Memory Used (GIB)','((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Used:{{humanize $value}}GiB|{threshold}GiB.','2020-05-21 01:18:06.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user','User CPU Used','User CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user[1m]))','CPU','Redis',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-05-29 09:37:22.273'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_container','Container cpu sum by container','container cpu sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_request_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{xm_cont_name!=''POD'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pods','Container cpu sum by pod','Container cpu sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pods','Container memory sum by pod','Container memory sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_container','Container memory sum by container','Container memory sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_limit_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{xm_cont_name!=''POD'',{filter}}, "data_type", "used", "" , ""))','Memory','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_disk_read_write_byte','Container disk read and write bytes','Container disk read and write bytes','sum by(xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_disk_read_write_byte','Pod disk read and write bytes','Pod disk read and write bytes','sum by(xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_io_byte','Container Network IO byte','Container Network IO byte','sum by (xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_io_byte','Pod Network IO byte','Pod Network IO byte','sum by (xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load1','Node CPU Load 1m Average','Node CPU 1m load average','node_load1{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 1m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:22:49.000','2019-05-15 08:22:49.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_open_file_descriptor','Node File Descriptor','Node File Descriptor','sum by(xm_clst_id, xm_node_id)(node_filefd_allocated {{filter}})','Filesystem','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} File Descriptor:{{humanize $value}}|{threshold}.','2020-05-21 01:18:06.000','2020-05-29 09:37:51.101'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_node_count','Node Type Sparselog Count','Node-type sparse log count by xm_clst_id, xm_node_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_node_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Node",{filter}}[1m])))','SparseLog','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_cache','Container Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_cache{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load15','Host CPU Load 15m Average','Host CPU 15m load average','node_load15{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 15m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:13.337','2020-03-23 04:08:13.337'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes_device','Node Disk Write Bytes per Device (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency','Node Disk Write Latency (ms)','Node Disk Write Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 11:00:56.000','2019-05-31 17:47:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_writes_count_device','Node Disk Writes Count per Device (IOPS)','Node Disk Writes Counts per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_writes_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Writes Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_throttled_rate','Container CPU Throttled Rate','container throttled rate','sum by(xm_clst_id, xm_cont_id) (rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="",{filter}}[1m]))','Cluster','Container',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_total_count','Node Pod Total Count','Node Pod Total Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec','Service HTTP Requests Count (per Second)','the number of HTTP requests counts per second','((sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))/ on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Http Requests/Second:{{humanize $value}}|{threshold}.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_per_sec','Service Pod HTTP Requests Count (per Second)','the number of HTTP requets counts per second for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod Http Requests/Seconds:{{humanize $value}}|{threshold}.','2019-11-07 07:51:11.000','2020-03-09 06:34:19.353'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_max_usage_bytes','Container Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_max_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_receive','Container Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_50th','Service HTTP 50% Elapsed Time (ms)','the maximum time taken to servce the 50% of HTTP requests','histogram_quantile(0.50, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 50th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_errors_count','Service Error Count','service error count','sum by(xm_clst_id, xm_namespace, xm_service_name, statuscode ) (imxc_service_errors_count{statuscode!="200",{filter}}) OR on() vector(0)','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Error Count:{{humanize $value}}|{threshold}.','2020-08-21 16:45:00.000','2020-08-21 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_used','Host Memory Used (GiB)','Memory information field MemUsed_bytes','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:21.399','2020-03-23 04:08:21.399'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_all_state','Workload Count All State','workload total count regardless of pod state','count by(xm_clst_id, controller_kind) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_running_pod','Workload Count Running Pod','workload count of Running state pod','sum by(xm_clst_id,controller_kind ) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit_device','Node Network Transmit per Device(KiB)','Network device statistic transmit_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive_device','Node Network Receive per Device(KiB)','Network device statistic receive_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_time_avg','Service Pod HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod http Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-11-07 07:51:46.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_system','Container CPU System (%)','Container CPU Usage (System)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_usage','Container CPU Usage (%)','Container CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_namespace','Pod Phase Count by Namespace','pod phase count by cluster, namespace','count by(xm_clst_id, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','Namespace','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} Pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_limit_bytes','Container Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage','Container Memory Usage (%)','Container memory usage compared to limit if limit is non-zero or 1GiB if limit is zero','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'', xm_cont_name!=''POD'', {filter}} / (container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} > 0) * 100) or sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024 / 1024 / 1024 * 100)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_swap','Container Memory Swap (GiB)','Container swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_swap{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_transmit','Container Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('controller_pod_count','Controller Pod Count','Controller Pod Count','sum (imxc_kubernetes_controller_counts{{filter}}) by (xm_clst_id, xm_namespace, xm_entity_name, xm_entity_type)','Pod','Controller',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Controller Pod Counts:{{humanize $value}}|{threshold}.','2019-10-10 06:39:09.000','2019-10-10 06:39:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load1','Host CPU Load 1m Average','Host CPU 1m load average','node_load1{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 1m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:09.946','2020-03-23 04:08:09.946'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_usage','Host CPU Usage (%)','Host CPU Usage','100 - (avg by (instance)(clamp_max(rate(node_cpu_seconds_total{mode=''idle'',{filter}}[1m]),1.0)) * 100)','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:07.606','2020-03-23 04:08:07.606'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_cpuutilization','The percentage of allocated EC2 compute','The percentage of allocated EC2 compute units that are currently in use on the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_cpuutilization_average{{filter}})','CPU','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections','Number of Incoming Connections','The number of incoming connections from clients to the database server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (mongodb_connections{{filter}})','Connection','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Incoming Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-13 02:26:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_buffer_io','Block read / write','mysql buffer I/O summary','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_innodb_buffer_pool_write_requests, "data_type", "write", "", "") or +label_replace(mysql_global_status_innodb_buffer_pool_read_requests, "data_type", "read", "", "") )','Block','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Mysql Buffer IO:{{humanize $value}}|{threshold}.','2019-12-05 07:30:33.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_reads','Number of Reads Directly from Disk','The number of logical reads that InnoDB could not satisfy from the buffer pool, and had to read directly from disk','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_reads[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Reads Directly from Disk Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_connections','Number of Connection Attempts','The number of connection attempts (successful or not) to the MySQL server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_connections[1m]))','Connection','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Connection Attempts counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_status_locks','Number of Locks in MySQL','Number of Locks in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_row_lock_current_waits[1m]), "data_type", "rowlocks", "", "") or +label_replace(rate(mysql_global_status_innodb_row_lock_waits[1m]), "data_type", "waits for rowlocks", "", "") or +label_replace(rate(mysql_global_status_table_locks_immediate[1m]), "data_type", "tablelock immediate", "", "") or +label_replace(rate(mysql_global_status_table_locks_waited[1m]), "data_type", "tablelock waited", "", "") )','Lock','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Mysql Status Locks:{{humanize $value}}|{threshold}.','2019-12-05 08:39:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage_bytes','Container Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_writes','Container Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_sessions_value','Session Count','Gauge metric with count of sessions by status and type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, status_type) +(label_join(oracledb_sessions_value, "status_type", "-", "status", "type"))','Session','OracleDB','status_type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Session Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_bytes','Bytes Written to Temporary Files (KiB)','Total amount of data written to temporary files by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_bytes[1m])) / 1024','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File Write Size:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys','System CPU Used','System CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user_children','User CPU Used Background','User CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used Background:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate','Service HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts','sum by(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / sum by +(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_cache_hit_ratio','Buffer Cache Hit Ratio (%)','(Number of Logical Read - Number of Reads Directly from Disk) / (Number of Logical Read) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ((increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) - increase(mysql_global_status_innodb_buffer_pool_reads[1m])) / increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) * 100)','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Buffer Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage','Pod Filesystem Usage (%)','Pod File System Usage: 100 * (Used Bytes / Limit Bytes)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} /((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_request','Node Pod CPU Request','Node Pod CPU Request','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} Pod CPU Requests:{{humanize $value}}|{threshold}.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_usage','Node Pod CPU Usage (%)','Node Pod CPU Usage','sum by (xm_clst_id,xm_node_id) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod CPU Usage:{{humanize $value}}%|{threshold}%.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_usage_core','Container CPU Usage (Core)','Container CPU Usage (Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_system_core','Container CPU System (Core)','Container CPU Usage (System)(Core)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_user_core','Container CPU User (Core)','Container CPU Usage (User)(Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_service','pod info in service','pod info(state, node) in service','sum by (xm_clst_id, xm_namespace, xm_service_name,xm_node_id,node_status,xm_pod_id,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2020-12-22 16:05:00.000','2020-12-22 16:05:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_state','Service State Count Sum','service state sum by xm_service_name','sum by (xm_service_name,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2021-01-06 17:30:00.000','2021-01-06 17:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_workload_state','Workload State Count Sum','wokload state sum by owner_name','count by (owner_name, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_workload','Pod info by workload type','pod info(state, node) by workload type (do filter param)','count by (xm_clst_id, xm_namespace, owner_name, xm_node_id, node_status, xm_pod_id, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_up_state','Node State metric','Node State metric for up, down check','imxc_kubernetes_node_ready{{filter}}','State','Node',NULL,true,false,'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Down {threshold}.','2020-02-02 14:30:00.000','2020-02-02 14:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100))', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name,xm_entity_type) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by (xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by(xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +--Number of Pods not running +INSERT INTO public.metric_meta2 VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()); +--Number of Containers not running +INSERT INTO public.metric_meta2 VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()); +-- Containers Restart count +INSERT INTO public.metric_meta2 VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_per_sec','Service Transaction Count (per Second)','Service Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Transaction Count (per Second)','2021-11-15 16:11:19.606','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_elapsed_time_avg','Service Pod Transaction Elapsed Time (avg)','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Average Elapsed Time','2021-11-15 16:09:34.233','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_rate','Service Transaction Error Rate','Service Transaction Error Rate','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2022-02-15 14:33:00.118000','2022-02-15 15:40:17.640000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_per_sec','Service Pod Transaction Count (per sec)','The number of transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-02-15 17:59:39.450000','2022-02-15 17:59:39.450000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_elapsed_time_avg','Service Average Elapsed Time','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))','Request','Service',null,true,true,'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2021-11-15 16:09:34.233000','2021-11-15 16:12:21.335000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_count','Service Transaction Error Count','Service Transaction Error Count','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])))','Request','Service',NULL,true,true,'Service Transaction Error Count','2021-11-15 16:10:31.352','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_error_rate','Service Pod Transaction Error Rate','The number of transaction error rate for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.','2022-02-15 18:08:58.180000','2022-02-15 18:08:58.180000'); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_active_txn_per_sec','Service Active Transaction Count (per Second)','Service Active Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:51:45.946','2022-03-11 15:51:45.946'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_active_txn_per_sec','Service Pod Active Transaction Count (per sec)','The number of active transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:53:29.252','2022-03-11 15:53:29.252'); + + +INSERT INTO public.license_key (id, license_key, set_time, in_used, tenant_id) VALUES (nextval('hibernate_sequence'), 'A46CB0A0870B60DD0EF554F092FB8490C647C4ACCF17177EB0028FEF1B677A1DC86C08219D3D357E55E87B653A9D2F044F9095576ED493CE5D1E180E8843A04BCFE94E500F85491D408CFC7397B82F00063415F4CF8756545B6ED1A38F07F91A7B6D9381B7FC433A5086CDD2D748527ECB42835677199F23F7C8E33A66E8138182DDD76BE4925FA4B1DFD96FD5578FE80C75E0E20D76877BF6FD570265D8E69CAC34795B982CF8D811669894886567E4F5F62E28990953401374B548787E35374BFF201D5C9AD062B326E72F9B1D7791A610DA1BDF1D4F829819BC537E06C8D54F95FB04F2DAC456698F605DE3BBD72E472FC79658C806B188988B053E1E4D96FFFFFF0312983D630FAD5E9160650653074248047030124045265319328119048121312221292096178141356403289033057286071001044254168244430392446457353385472238471183338511051434316333006127241420429465082200161165099271484261287306170426201314452131350327249112310323036187433166345114324280269098441154231174135226128298344425341164290424093450115453299282209144110060155055496368233391148510223372355438125122460232315097083390283180026090507303464176016343147301028053052418046214169100404193398101492126437150008449359062078276386196105011194373118107003376243188284337378334352432479501211364186021040035210237120336302073022394079272002081397132067383497202300181309396185361017436058208454167203412219275329234043427354024133409339470296204490485256467335056F5B2CABD122B376DAEA67944E1CCE6867DF9EB6504C78F817DF9EB6504C78F81BF1E615E6EC6242C9667BD675FC5FA39C6672FE2068E5D1431C6CD04429D07655865E293C1F77ED7A0D33F5556DA6CD3A8EC2774DB04F797CE4A29B0312F75E585D51D7B4DD227EA6BD5278CB9233040E7DD2B30A6D5119959D5B7EAC826D3DA0537EFB5A034A6A1C91A619F4E168F46A455B594C91F058E1E22C7EA2957EED7533D069C335C95B4FA2B53E71A800343EA7F16B05AFBA04635F1FBDE9C81709C27BA075C78FA26311ED3A4A5226EF47FC84C3024999406B47F2098B5983CC3CAF79F92332074B9872E429CBE8EF12D5092628E4D4A39CBDDFCAAB2E382229CF09A5B10243340C1A7A0C5CBC14C704FCE873571524A5B038F1781CD31A4D8E2C48E02E63A2746E668273BE9D63937B88D8C864CE439528EB13BDFAC3E52EE4B8CB75B4ED65A7C97B42E5DAEE3E41D2331B06FFFBA71BECD9B96AEEB969670FC3869CC59050FD6DFA32457195314104022250232266247291151DEFAULT_TENANT', now(), true, 'DEFAULT_TENANT'); +insert into public.license_key2 (id, license_key, set_time, cluster_id, license_used) values (nextval('hibernate_sequence'), 'D041F44269EAFF1AF7C37ACAA86B7D9CBED89547431E777B797220CF62FE5D6A27C66BEBEAB8F4C89EA5379009C90CDEBFFAE307B7AEB897DC4D8CEAB61654340BB746B0B46679A9FB4791C777BAEBA176308F6BEB1654CE43D4E80E6D0F80CEC00B1EC30E7DA4BB8D3159133EF98AEB50617107DB77BE94676E0D4AA04ADA3B11A66824DB89A60C52BC1AB92926F10189DBBA6210B31478F48CF87B5D754F1A7C6BED0D1637742179DBF7BE82B3B3357AEA82CFAAD9126E39C4E19BABCB1CBDDB816C86A8F7C476D963265720383B627800775B0C9116D67CE5CB7CFC71D0A8A36623965EBB18A5BE1816FB1FAAAEAC361D2ABBC7344EC0B6C61E0395115B13FFFFFF03DEF34E840F2ED2AC84AC44DF368362366124308470063002498494067338303241077065122260378200508377102354337080160182150254091118451110391059070094162363290186239455351194330333503046082379128006166220287276298120398066372099177432015458270176242025196335311342039022343475412085392206244005184417460227292375103433217376511140361223163316121467443014486278407389237024349111268136424371062035285300509195050441367478101310353464249250399393211468032382017479033204215420319027225173414447170427346074048078201158299332476339297492269181214328291096331271222221199421106169418137405411466364104047152090465446480302462385088114481261428257207129020358100073347153355274495263056109229159157348228275180360410147142130230179450079472482323145202198010119F9BFDDF3C203A7E537AB046811BB7CEA37AB046811BB7CEA37AB046811BB7CEAE012403885A8163C0E3E14D7AD6207B5E8CE91579501D84B09D6682339A4DB462F479FFE1B232AFB3D19E925768AF0AA3E62D9AB6F9CEADDB1CDCA351CAA90996631814A556C47270431A6A40891F756FDDCA7BDD05C62A2932F8E77979E0D43C9F12565B1F4BB4F0520B44CC76BAC23F65330AC5966D22B209F32126132F4848E500A013F4DC32306A9620394D40C94B8EBC2406B68EBE31DAB17EF2DF977731A5C41C11311DC36E1FB8BC2529D1AA20D5D46919472212D781B1D77378872CBD14C2A5B783C7ADF0D2680946C52E56E186A7E971E7EAB2CF09511361DD892B5D4A113E8A2C60E3F7FEFA4100753D82B7064101002937733CE0285C73130635F0CBBDF6F1160C2917B2DF9B1C391A8E9D7D9F380BF31A77A84017D0DF26B35BED6B2D145A051EB4345DA90241CA997828B8393ACD5C7316594634356CCC3986EFDD7776AC62C65E500ED125097142489479219130046503035CloudMOA', now(), null, true); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); + +INSERT INTO public.report_template(id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) VALUES(nextval('hibernate_sequence'), 'admin', '2020-04-28 09:29:49.466', 'admin', '2020-04-28 09:29:49.466', '0 0 1 ? * * *', true, +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s Cluster resource usage is displayed.

1. CPU Usage

${metricItem1587977724113}

2. Memory Usage

${metricItem1588037028605}

3. Network

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod


1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}





', 'cloudmoa Cluster Daily Report'); +INSERT INTO public.report_template (id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) +VALUES(nextval('hibernate_sequence'), 'admin', '2020-01-20 01:17:50.182', 'admin', '2020-04-29 08:01:40.841', '0 0 9 ? * * *', false, +'[{"id":"metricItem1579497906163","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_cpu_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1579497916213","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_memory_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Memory Usage (%)","displayType":"bar","unit":"%","data":""},{"id":"metricItem1579497928963","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_network_receive","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Network Receive (KiB)","displayType":"pie","unit":"%","data":""},{"id":"metricItem1579497947243","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_load5","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Load 5m Average","displayType":"table","unit":"%","data":""}]', +'

1. editor usage

Let''s write the editor.

1.1 Text Decoration

Bold
Itelic
Strike


1.2 Color and blockquote

What''s your color?

Today is the first day of the rest of your life

1.3 List

  • Apple
  • Banana

  1. postgre
  2. cassandra
  3. prometheus

[ TODO List ]
  • Create DB table
  • Charge file name

1.4 Link, Table, Image




Deamonset NameAgeNamespaceLabelsImageCPUMemory
imxc-agent5
day
imxcimxc-agentregistry.openstacklocal:5000/imxc/imxc-agent:latest83.151.68
GiB
kube-flannel-ds-amd643
month
kube-systemflannelnodequay.io/coreos/flannel:v0.11.0-amd641.0790.88
MiB
kube-proxy10
month
kube-systemkube-proxyk8s.gcr.io/kube-proxy:v1.16.01.18117.66
MiB
node-exporter10
month
defaultnode-exporternode-exporterprom/node-exporter4.7697.54
MiB

exem.jpg

1.6 Metric Item

${metricItem1579497906163}
${metricItem1579497916213}
${metricItem1579497928963}
${metricItem1579497947243}



















', 'Editor usage example'); + +INSERT INTO public.report_static(id, created_by, created_date, modified_by, modified_date, cron_exp, metric_data, template_data, title, "type", report_template_id) VALUES(10582051, 'admin', '2020-04-29 08:27:52.545', 'admin', '2020-04-29 08:27:52.545', '0 0 1 ? * * *', +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s cluster resource usage flow is shown.

1. CPU Usage

Abnormally high CPU usage by particular programs can be an indication that there is something wrong with the computer system.

${metricItem1587977724113}

2. Memory Usage

The Memory Usage window displays the amount of memory available on your system, as well as the memory currently in use by all applications, including Windows itself.

${metricItem1588037028605}

3. Network

A network transmit/receive provides basic network utilization data in relation to the available network capacity.

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod

1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}







', +'cloudmoa Cluster Daily Report', 'manual', (select id from report_template where title='cloudmoa Cluster Daily Report')); + +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', (select id from auth_resource2 where name='CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', (select id from auth_resource2 where name='Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', +(select id from auth_resource3 where name='dashboard|admin|CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', +(select id from auth_resource3 where name='dashboard|admin|Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'cloudmoa-trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'cmoa-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +insert into public.log_management (cluster_id, node_id, log_rotate_dir, log_rotate_count, log_rotate_size, log_rotate_management, back_up_dir, back_up_period, back_up_dir_size, back_up_management, created_date, modified_date) values ('cloudmoa', '', '/var/lib/docker', 3, 100, true, '/home/moa/log', 5, 1000, true, '2020-07-30 13:54:52', null); + +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (5, 'metrics-server', 'agent', 'Metrcis-Server는 Kubernetes의 kubelet에 있는 cAdvisor로부터 Container Metric 데이터를 수집하여 Prometheus에 전달하는 역할을 합니다.', null, '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1', true, '2021-03-11 13:41:48.000000', '2021-03-11 13:41:56.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. +', null, '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', null, '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', null, '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.16', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.15', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); + + +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"', true); + + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); diff --git a/roles/cmoa_demo_install/files/04-keycloak/Chart.yaml b/roles/cmoa_demo_install/files/04-keycloak/Chart.yaml new file mode 100644 index 0000000..a5d4032 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 4.0.0 +description: Modified Authentication Module By EXEM CloudMOA +home: https://www.keycloak.org/ +icon: https://www.keycloak.org/resources/images/keycloak_logo_480x108.png +keywords: +- sso +- idm +- openid connect +- saml +- kerberos +- ldap +maintainers: +- email: unguiculus@gmail.com + name: unguiculus +- email: thomas.darimont+github@gmail.com + name: thomasdarimont +name: keycloak +sources: +- https://github.com/codecentric/helm-charts +- https://github.com/jboss-dockerfiles/keycloak +- https://github.com/bitnami/charts/tree/master/bitnami/postgresql +version: 11.0.1 diff --git a/roles/cmoa_demo_install/files/04-keycloak/OWNERS b/roles/cmoa_demo_install/files/04-keycloak/OWNERS new file mode 100644 index 0000000..8c2ff0d --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/OWNERS @@ -0,0 +1,6 @@ +approvers: + - unguiculus + - thomasdarimont +reviewers: + - unguiculus + - thomasdarimont diff --git a/roles/cmoa_demo_install/files/04-keycloak/README.md b/roles/cmoa_demo_install/files/04-keycloak/README.md new file mode 100644 index 0000000..5f8da10 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/README.md @@ -0,0 +1,765 @@ +# Keycloak + +[Keycloak](http://www.keycloak.org/) is an open source identity and access management for modern applications and services. + +## TL;DR; + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Introduction + +This chart bootstraps a [Keycloak](http://www.keycloak.org/) StatefulSet on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +It provisions a fully featured Keycloak installation. +For more information on Keycloak and its capabilities, see its [documentation](http://www.keycloak.org/documentation.html). + +## Prerequisites Details + +The chart has an optional dependency on the [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart. +By default, the PostgreSQL chart requires PV support on underlying infrastructure (may be disabled). + +## Installing the Chart + +To install the chart with the release name `keycloak`: + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Uninstalling the Chart + +To uninstall the `keycloak` deployment: + +```console +$ helm uninstall keycloak +``` + +## Configuration + +The following table lists the configurable parameters of the Keycloak chart and their default values. + +| Parameter | Description | Default | +|---|---|---| +| `fullnameOverride` | Optionally override the fully qualified name | `""` | +| `nameOverride` | Optionally override the name | `""` | +| `replicas` | The number of replicas to create | `1` | +| `image.repository` | The Keycloak image repository | `docker.io/jboss/keycloak` | +| `image.tag` | Overrides the Keycloak image tag whose default is the chart version | `""` | +| `image.pullPolicy` | The Keycloak image pull policy | `IfNotPresent` | +| `imagePullSecrets` | Image pull secrets for the Pod | `[]` | +| `hostAliases` | Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files | `[]` | +| `enableServiceLinks` | Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links | `true` | +| `podManagementPolicy` | Pod management policy. One of `Parallel` or `OrderedReady` | `Parallel` | +| `restartPolicy` | Pod restart policy. One of `Always`, `OnFailure`, or `Never` | `Always` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | `""` | +| `serviceAccount.annotations` | Additional annotations for the ServiceAccount | `{}` | +| `serviceAccount.labels` | Additional labels for the ServiceAccount | `{}` | +| `serviceAccount.imagePullSecrets` | Image pull secrets that are attached to the ServiceAccount | `[]` | +| `rbac.create` | Specifies whether RBAC resources are to be created | `false` +| `rbac.rules` | Custom RBAC rules, e. g. for KUBE_PING | `[]` +| `podSecurityContext` | SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) | `{"fsGroup":1000}` | +| `securityContext` | SecurityContext for the Keycloak container | `{"runAsNonRoot":true,"runAsUser":1000}` | +| `extraInitContainers` | Additional init containers, e. g. for providing custom themes | `[]` | +| `extraContainers` | Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy | `[]` | +| `lifecycleHooks` | Lifecycle hooks for the Keycloak container | `{}` | +| `terminationGracePeriodSeconds` | Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance | `60` | +| `clusterDomain` | The internal Kubernetes cluster domain | `cluster.local` | +| `command` | Overrides the default entrypoint of the Keycloak container | `[]` | +| `args` | Overrides the default args for the Keycloak container | `[]` | +| `extraEnv` | Additional environment variables for Keycloak | `""` | +| `extraEnvFrom` | Additional environment variables for Keycloak mapped from a Secret or ConfigMap | `""` | +| `priorityClassName` | Pod priority class name | `""` | +| `affinity` | Pod affinity | Hard node and soft zone anti-affinity | +| `nodeSelector` | Node labels for Pod assignment | `{}` | +| `tolerations` | Node taints to tolerate | `[]` | +| `podLabels` | Additional Pod labels | `{}` | +| `podAnnotations` | Additional Pod annotations | `{}` | +| `livenessProbe` | Liveness probe configuration | `{"httpGet":{"path":"/health/live","port":"http"},"initialDelaySeconds":300,"timeoutSeconds":5}` | +| `readinessProbe` | Readiness probe configuration | `{"httpGet":{"path":"/auth/realms/master","port":"http"},"initialDelaySeconds":30,"timeoutSeconds":1}` | +| `resources` | Pod resource requests and limits | `{}` | +| `startupScripts` | Startup scripts to run before Keycloak starts up | `{"keycloak.cli":"{{- .Files.Get "scripts/keycloak.cli" \| nindent 2 }}"}` | +| `extraVolumes` | Add additional volumes, e. g. for custom themes | `""` | +| `extraVolumeMounts` | Add additional volumes mounts, e. g. for custom themes | `""` | +| `extraPorts` | Add additional ports, e. g. for admin console or exposing JGroups ports | `[]` | +| `podDisruptionBudget` | Pod disruption budget | `{}` | +| `statefulsetAnnotations` | Annotations for the StatefulSet | `{}` | +| `statefulsetLabels` | Additional labels for the StatefulSet | `{}` | +| `secrets` | Configuration for secrets that should be created | `{}` | +| `service.annotations` | Annotations for headless and HTTP Services | `{}` | +| `service.labels` | Additional labels for headless and HTTP Services | `{}` | +| `service.type` | The Service type | `ClusterIP` | +| `service.loadBalancerIP` | Optional IP for the load balancer. Used for services of type LoadBalancer only | `""` | +| `loadBalancerSourceRanges` | Optional List of allowed source ranges (CIDRs). Used for service of type LoadBalancer only | `[]` | +| `service.httpPort` | The http Service port | `80` | +| `service.httpNodePort` | The HTTP Service node port if type is NodePort | `""` | +| `service.httpsPort` | The HTTPS Service port | `8443` | +| `service.httpsNodePort` | The HTTPS Service node port if type is NodePort | `""` | +| `service.httpManagementPort` | The WildFly management Service port | `8443` | +| `service.httpManagementNodePort` | The WildFly management node port if type is NodePort | `""` | +| `service.extraPorts` | Additional Service ports, e. g. for custom admin console | `[]` | +| `service.sessionAffinity` | sessionAffinity for Service, e. g. "ClientIP" | `""` | +| `service.sessionAffinityConfig` | sessionAffinityConfig for Service | `{}` | +| `ingress.enabled` | If `true`, an Ingress is created | `false` | +| `ingress.rules` | List of Ingress Ingress rule | see below | +| `ingress.rules[0].host` | Host for the Ingress rule | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.rules[0].paths` | Paths for the Ingress rule | `[/]` | +| `ingress.servicePort` | The Service port targeted by the Ingress | `http` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Additional Ingress labels | `{}` | +| `ingress.tls` | TLS configuration | see below | +| `ingress.tls[0].hosts` | List of TLS hosts | `[keycloak.example.com]` | +| `ingress.tls[0].secretName` | Name of the TLS secret | `""` | +| `ingress.console.enabled` | If `true`, an Ingress for the console is created | `false` | +| `ingress.console.rules` | List of Ingress Ingress rule for the console | see below | +| `ingress.console.rules[0].host` | Host for the Ingress rule for the console | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.console.rules[0].paths` | Paths for the Ingress rule for the console | `[/auth/admin]` | +| `ingress.console.annotations` | Ingress annotations for the console | `{}` | +| `networkPolicy.enabled` | If true, the ingress network policy is deployed | `false` +| `networkPolicy.extraFrom` | Allows to define allowed external traffic (see Kubernetes doc for network policy `from` format) | `[]` +| `route.enabled` | If `true`, an OpenShift Route is created | `false` | +| `route.path` | Path for the Route | `/` | +| `route.annotations` | Route annotations | `{}` | +| `route.labels` | Additional Route labels | `{}` | +| `route.host` | Host name for the Route | `""` | +| `route.tls.enabled` | If `true`, TLS is enabled for the Route | `true` | +| `route.tls.insecureEdgeTerminationPolicy` | Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` | `Redirect` | +| `route.tls.termination` | TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` | `edge` | +| `pgchecker.image.repository` | Docker image used to check Postgresql readiness at startup | `docker.io/busybox` | +| `pgchecker.image.tag` | Image tag for the pgchecker image | `1.32` | +| `pgchecker.image.pullPolicy` | Image pull policy for the pgchecker image | `IfNotPresent` | +| `pgchecker.securityContext` | SecurityContext for the pgchecker container | `{"allowPrivilegeEscalation":false,"runAsGroup":1000,"runAsNonRoot":true,"runAsUser":1000}` | +| `pgchecker.resources` | Resource requests and limits for the pgchecker container | `{"limits":{"cpu":"10m","memory":"16Mi"},"requests":{"cpu":"10m","memory":"16Mi"}}` | +| `postgresql.enabled` | If `true`, the Postgresql dependency is enabled | `true` | +| `postgresql.postgresqlUsername` | PostgreSQL User to create | `keycloak` | +| `postgresql.postgresqlPassword` | PostgreSQL Password for the new user | `keycloak` | +| `postgresql.postgresqlDatabase` | PostgreSQL Database to create | `keycloak` | +| `serviceMonitor.enabled` | If `true`, a ServiceMonitor resource for the prometheus-operator is created | `false` | +| `serviceMonitor.namespace` | Optionally sets a target namespace in which to deploy the ServiceMonitor resource | `""` | +| `serviceMonitor.namespaceSelector` | Optionally sets a namespace selector for the ServiceMonitor | `{}` | +| `serviceMonitor.annotations` | Annotations for the ServiceMonitor | `{}` | +| `serviceMonitor.labels` | Additional labels for the ServiceMonitor | `{}` | +| `serviceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `serviceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `serviceMonitor.path` | The path at which metrics are served | `/metrics` | +| `serviceMonitor.port` | The Service port at which metrics are served | `http` | +| `extraServiceMonitor.enabled` | If `true`, an additional ServiceMonitor resource for the prometheus-operator is created. Could be used for additional metrics via [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) | `false` | +| `extraServiceMonitor.namespace` | Optionally sets a target namespace in which to deploy the additional ServiceMonitor resource | `""` | +| `extraServiceMonitor.namespaceSelector` | Optionally sets a namespace selector for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.annotations` | Annotations for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.labels` | Additional labels for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `extraServiceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `extraServiceMonitor.path` | The path at which metrics are served | `/metrics` | +| `extraServiceMonitor.port` | The Service port at which metrics are served | `http` | +| `prometheusRule.enabled` | If `true`, a PrometheusRule resource for the prometheus-operator is created | `false` | +| `prometheusRule.annotations` | Annotations for the PrometheusRule | `{}` | +| `prometheusRule.labels` | Additional labels for the PrometheusRule | `{}` | +| `prometheusRule.rules` | List of rules for Prometheus | `[]` | +| `autoscaling.enabled` | Enable creation of a HorizontalPodAutoscaler resource | `false` | +| `autoscaling.labels` | Additional labels for the HorizontalPodAutoscaler resource | `{}` | +| `autoscaling.minReplicas` | The minimum number of Pods when autoscaling is enabled | `3` | +| `autoscaling.maxReplicas` | The maximum number of Pods when autoscaling is enabled | `10` | +| `autoscaling.metrics` | The metrics configuration for the HorizontalPodAutoscaler | `[{"resource":{"name":"cpu","target":{"averageUtilization":80,"type":"Utilization"}},"type":"Resource"}]` | +| `autoscaling.behavior` | The scaling policy configuration for the HorizontalPodAutoscaler | `{"scaleDown":{"policies":[{"periodSeconds":300,"type":"Pods","value":1}],"stabilizationWindowSeconds":300}` | +| `test.enabled` | If `true`, test resources are created | `false` | +| `test.image.repository` | The image for the test Pod | `docker.io/unguiculus/docker-python3-phantomjs-selenium` | +| `test.image.tag` | The tag for the test Pod image | `v1` | +| `test.image.pullPolicy` | The image pull policy for the test Pod image | `IfNotPresent` | +| `test.podSecurityContext` | SecurityContext for the entire test Pod | `{"fsGroup":1000}` | +| `test.securityContext` | SecurityContext for the test container | `{"runAsNonRoot":true,"runAsUser":1000}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --set replicas=1 +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --values values.yaml +``` + +The chart offers great flexibility. +It can be configured to work with the official Keycloak Docker image but any custom image can be used as well. + +For the offical Docker image, please check it's configuration at https://github.com/keycloak/keycloak-containers/tree/master/server. + +### Usage of the `tpl` Function + +The `tpl` function allows us to pass string values from `values.yaml` through the templating engine. +It is used for the following values: + +* `extraInitContainers` +* `extraContainers` +* `extraEnv` +* `extraEnvFrom` +* `affinity` +* `extraVolumeMounts` +* `extraVolumes` +* `livenessProbe` +* `readinessProbe` + +Additionally, custom labels and annotations can be set on various resources the values of which being passed through `tpl` as well. + +It is important that these values be configured as strings. +Otherwise, installation will fail. +See example for Google Cloud Proxy or default affinity configuration in `values.yaml`. + +### JVM Settings + +Keycloak sets the following system properties by default: +`-Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS -Djava.awt.headless=true` + +You can override these by setting the `JAVA_OPTS` environment variable. +Make sure you configure container support. +This allows you to only configure memory using Kubernetes resources and the JVM will automatically adapt. + +```yaml +extraEnv: | + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true +``` + +### Database Setup + +By default, Bitnami's [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart is deployed and used as database. +Please refer to this chart for additional PostgreSQL configuration options. + +#### Using an External Database + +The Keycloak Docker image supports various database types. +Configuration happens in a generic manner. + +##### Using a Secret Managed by the Chart + +The following examples uses a PostgreSQL database with a secret that is managed by the Helm chart. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + +extraEnvFrom: | + - secretRef: + name: '{{ include "keycloak.fullname" . }}-db' + +secrets: + db: + stringData: + DB_USER: '{{ .Values.dbUser }}' + DB_PASSWORD: '{{ .Values.dbPassword }}' +``` + +`dbUser` and `dbPassword` are custom values you'd then specify on the commandline using `--set-string`. + +##### Using an Existing Secret + +The following examples uses a PostgreSQL database with a secret. +Username and password are mounted as files. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + - name: DB_USER_FILE + value: /secrets/db-creds/user + - name: DB_PASSWORD_FILE + value: /secrets/db-creds/password + +extraVolumeMounts: | + - name: db-creds + mountPath: /secrets/db-creds + readOnly: true + +extraVolumes: | + - name: db-creds + secret: + secretName: keycloak-db-creds +``` + +### Creating a Keycloak Admin User + +The Keycloak Docker image supports creating an initial admin user. +It must be configured via environment variables: + +* `KEYCLOAK_USER` or `KEYCLOAK_USER_FILE` +* `KEYCLOAK_PASSWORD` or `KEYCLOAK_PASSWORD_FILE` + +Please refer to the section on database configuration for how to configure a secret for this. + +### High Availability and Clustering + +For high availability, Keycloak must be run with multiple replicas (`replicas > 1`). +The chart has a helper template (`keycloak.serviceDnsName`) that creates the DNS name based on the headless service. + +#### DNS_PING Service Discovery + +JGroups discovery via DNS_PING can be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +#### KUBE_PING Service Discovery + +Recent versions of Keycloak include a new Kubernetes native [KUBE_PING](https://github.com/jgroups-extras/jgroups-kubernetes) service discovery protocol. +This requires a little more configuration than DNS_PING but can easily be achieved with the Helm chart. + +As with DNS_PING some environment variables must be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +However, the Keycloak Pods must also get RBAC permissions to `get` and `list` Pods in the namespace which can be configured as follows: + +```yaml +rbac: + create: true + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +``` + +#### Autoscaling + +Due to the caches in Keycloak only replicating to a few nodes (two in the example configuration above) and the limited controls around autoscaling built into Kubernetes, it has historically been problematic to autoscale Keycloak. +However, in Kubernetes 1.18 [additional controls were introduced](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior) which make it possible to scale down in a more controlled manner. + +The example autoscaling configuration in the values file scales from three up to a maximum of ten Pods using CPU utilization as the metric. Scaling up is done as quickly as required but scaling down is done at a maximum rate of one Pod per five minutes. + +Autoscaling can be enabled as follows: + +```yaml +autoscaling: + enabled: true +``` + +KUBE_PING service discovery seems to be the most reliable mechanism to use when enabling autoscaling, due to being faster than DNS_PING at detecting changes in the cluster. + +### Running Keycloak Behind a Reverse Proxy + +When running Keycloak behind a reverse proxy, which is the case when using an ingress controller, +proxy address forwarding must be enabled as follows: + +```yaml +extraEnv: | + - name: PROXY_ADDRESS_FORWARDING + value: "true" +``` + +### Providing a Custom Theme + +One option is certainly to provide a custom Keycloak image that includes the theme. +However, if you prefer to stick with the official Keycloak image, you can use an init container as theme provider. + +Create your own theme and package it up into a Docker image. + +```docker +FROM busybox +COPY mytheme /mytheme +``` + +In combination with an `emptyDir` that is shared with the Keycloak container, configure an init container that runs your theme image and copies the theme over to the right place where Keycloak will pick it up automatically. + +```yaml +extraInitContainers: | + - name: theme-provider + image: myuser/mytheme:1 + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes/mytheme + +extraVolumes: | + - name: theme + emptyDir: {} +``` + +### Setting a Custom Realm + +A realm can be added by creating a secret or configmap for the realm json file and then supplying this into the chart. +It can be mounted using `extraVolumeMounts` and then referenced as environment variable `KEYCLOAK_IMPORT`. +First we need to create a Secret from the realm JSON file using `kubectl create secret generic realm-secret --from-file=realm.json` which we need to reference in `values.yaml`: + +```yaml +extraVolumes: | + - name: realm-secret + secret: + secretName: realm-secret + +extraVolumeMounts: | + - name: realm-secret + mountPath: "/realm/" + readOnly: true + +extraEnv: | + - name: KEYCLOAK_IMPORT + value: /realm/realm.json +``` + +Alternatively, the realm file could be added to a custom image. + +After startup the web admin console for the realm should be available on the path /auth/admin/\/console/. + +### Using Google Cloud SQL Proxy + +Depending on your environment you may need a local proxy to connect to the database. +This is, e. g., the case for Google Kubernetes Engine when using Google Cloud SQL. +Create the secret for the credentials as documented [here](https://cloud.google.com/sql/docs/postgres/connect-kubernetes-engine) and configure the proxy as a sidecar. + +Because `extraContainers` is a string that is passed through the `tpl` function, it is possible to create custom values and use them in the string. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +# Custom values for Google Cloud SQL +cloudsql: + project: my-project + region: europe-west1 + instance: my-instance + +extraContainers: | + - name: cloudsql-proxy + image: gcr.io/cloudsql-docker/gce-proxy:1.17 + command: + - /cloud_sql_proxy + args: + - -instances={{ .Values.cloudsql.project }}:{{ .Values.cloudsql.region }}:{{ .Values.cloudsql.instance }}=tcp:5432 + - -credential_file=/secrets/cloudsql/credentials.json + volumeMounts: + - name: cloudsql-creds + mountPath: /secrets/cloudsql + readOnly: true + +extraVolumes: | + - name: cloudsql-creds + secret: + secretName: cloudsql-instance-credentials + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: "127.0.0.1" + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: postgres + - name: DB_USER + value: myuser + - name: DB_PASSWORD + value: mypassword +``` + +### Changing the Context Path + +By default, Keycloak is served under context `/auth`. +This can be changed as follows: + +```yaml +contextPath: mycontext + +startupScripts: + # cli script that reconfigures WildFly + contextPath.cli: | + embed-server --server-config=standalone-ha.xml --std-out=echo + batch + {{- if ne .Values.contextPath "auth" }} + /subsystem=keycloak-server/:write-attribute(name=web-context,value={{ if eq .Values.contextPath "" }}/{{ else }}{{ .Values.contextPath }}{{ end }}) + {{- if eq .Values.contextPath "" }} + /subsystem=undertow/server=default-server/host=default-host:write-attribute(name=default-web-module,value=keycloak-server.war) + {{- end }} + {{- end }} + run-batch + stop-embedded-server + +livenessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +readinessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +The above YAML references introduces the custom value `contextPath` which is possible because `startupScripts`, `livenessProbe`, and `readinessProbe` are templated using the `tpl` function. +Note that it must not start with a slash. +Alternatively, you may supply it via CLI flag: + +```console +--set-string contextPath=mycontext +``` + +### Prometheus Metrics Support + +#### WildFly Metrics + +WildFly can expose metrics on the management port. +In order to achieve this, the environment variable `KEYCLOAK_STATISTICS` must be set. + +```yaml +extraEnv: | + - name: KEYCLOAK_STATISTICS + value: all +``` + +Add a ServiceMonitor if using prometheus-operator: + +```yaml +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing the ServiceMonitor and for adding custom Prometheus rules. + +Add annotations if you don't use prometheus-operator: + +```yaml +service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9990" +``` + +#### Keycloak Metrics SPI + +Optionally, it is possible to add [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) via init container. + +A separate `ServiceMonitor` can be enabled to scrape metrics from the SPI: + +```yaml +extraServiceMonitor: + # If `true`, an additional ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing this ServiceMonitor. + +Note that the metrics endpoint is exposed on the HTTP port. +You may want to restrict access to it in your ingress controller configuration. +For ingress-nginx, this could be done as follows: + +```yaml +annotations: + nginx.ingress.kubernetes.io/server-snippet: | + location ~* /auth/realms/[^/]+/metrics { + return 403; + } +``` + +## Why StatefulSet? + +The chart sets node identifiers to the system property `jboss.node.name` which is in fact the pod name. +Node identifiers must not be longer than 23 characters. +This can be problematic because pod names are quite long. +We would have to truncate the chart's fullname to six characters because pods get a 17-character suffix (e. g. `-697f8b7655-mf5ht`). +Using a StatefulSet allows us to truncate to 20 characters leaving room for up to 99 replicas, which is much better. +Additionally, we get stable values for `jboss.node.name` which can be advantageous for cluster discovery. +The headless service that governs the StatefulSet is used for DNS discovery via DNS_PING. + +## Upgrading + +### From chart < 10.0.0 + +* Keycloak is updated to 12.0.4 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 9.0.0 + +The Keycloak chart received a major facelift and, thus, comes with breaking changes. +Opinionated stuff and things that are now baked into Keycloak's Docker image were removed. +Configuration is more generic making it easier to use custom Docker images that are configured differently than the official one. + +* Values are no longer nested under `keycloak`. +* Besides setting the node identifier, no CLI changes are performed out of the box +* Environment variables for the Postresql dependency are set automatically if enabled. + Otherwise, no environment variables are set by default. +* Optionally enables creating RBAC resources with configurable rules (e. g. for KUBE_PING) +* PostgreSQL chart dependency is updated to 9.1.1 + +### From chart versions < 8.0.0 + +* Keycloak is updated to 10.0.0 +* PostgreSQL chart dependency is updated to 8.9.5 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 7.0.0 + +Version 7.0.0 update breaks backwards-compatibility with the existing `keycloak.persistence.existingSecret` scheme. + +#### Changes in Configuring Database Credentials from an Existing Secret + +Both `DB_USER` and `DB_PASS` are always read from a Kubernetes Secret. +This is a requirement if you are provisioning database credentials dynamically - either via an Operator or some secret-management engine. + +The variable referencing the password key name has been renamed from `keycloak.persistence.existingSecretKey` to `keycloak.persistence.existingSecretPasswordKey` + +A new, optional variable for referencing the username key name for populating the `DB_USER` env has been added: +`keycloak.persistence.existingSecretUsernameKey`. + +If `keycloak.persistence.existingSecret` is left unset, a new Secret will be provisioned populated with the `dbUser` and `dbPassword` Helm variables. + +###### Example configuration: +```yaml +keycloak: + persistence: + existingSecret: keycloak-provisioned-db-credentials + existingSecretPasswordKey: PGPASSWORD + existingSecretUsernameKey: PGUSER + ... +``` +### From chart versions < 6.0.0 + +#### Changes in Probe Configuration + +Now both readiness and liveness probes are configured as strings that are then passed through the `tpl` function. +This allows for greater customizability of the readiness and liveness probes. + +The defaults are unchanged, but since 6.0.0 configured as follows: + +```yaml + livenessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + readinessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +#### Changes in Existing Secret Configuration + +This can be useful if you create a secret in a parent chart and want to reference that secret. +Applies to `keycloak.existingSecret` and `keycloak.persistence.existingSecret`. + +_`values.yaml` of parent chart:_ +```yaml +keycloak: + keycloak: + existingSecret: '{{ .Release.Name }}-keycloak-secret' +``` + +#### HTTPS Port Added + +The HTTPS port was added to the pod and to the services. +As a result, service ports are now configured differently. + + +### From chart versions < 5.0.0 + +Version 5.0.0 is a major update. + +* The chart now follows the new Kubernetes label recommendations: +https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +* Several changes to the StatefulSet render an out-of-the-box upgrade impossible because StatefulSets only allow updates to a limited set of fields +* The chart uses the new support for running scripts at startup that has been added to Keycloak's Docker image. +If you use this feature, you will have to adjust your configuration + +However, with the following manual steps an automatic upgrade is still possible: + +1. Adjust chart configuration as necessary (e. g. startup scripts) +1. Perform a non-cascading deletion of the StatefulSet which keeps the pods running +1. Add the new labels to the pods +1. Run `helm upgrade` + +Use a script like the following to add labels and to delete the StatefulSet: + +```console +#!/bin/sh + +release= +namespace= + +kubectl delete statefulset -n "$namespace" -l app=keycloak -l release="$release" --cascade=false + +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/name=keycloak +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/instance="$release" +``` + +**NOTE:** Version 5.0.0 also updates the Postgresql dependency which has received a major upgrade as well. +In case you use this dependency, the database must be upgraded first. +Please refer to the Postgresql chart's upgrading section in its README for instructions. diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/.helmignore b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/Chart.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/Chart.yaml new file mode 100644 index 0000000..48d8f2f --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.8.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 9.1.1 diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/README.md b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/README.md new file mode 100644 index 0000000..c84cc7b --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/README.md @@ -0,0 +1,625 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR; + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. |`nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List |`nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `securityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 0000000..b4d8828 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 0.3.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: http://www.bitnami.com/ +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +version: 0.3.1 diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/README.md b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/README.md new file mode 100644 index 0000000..ab50967 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/README.md @@ -0,0 +1,228 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR; + +```yaml +dependencies: + - name: common + version: 0.1.0 + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +**Names** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +**Images** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $` | + +**Labels** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +**Storage** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +**TplValues** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +**Capabilities** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +**Warnings** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +**Secrets** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +**Example of use** + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +## Notable changes + +N/A diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..c0ea2c7 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..ee6673a --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,44 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $) }} +*/}} +{{- define "common.images.pullSecrets" -}} +{{- if .global }} +{{- if .global.imagePullSecrets }} +imagePullSecrets: + {{- range .global.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- else }} +{{- $pullSecrets := list }} +{{- range .images }} + {{- if .pullSecrets }} + {{- $pullSecrets = append $pullSecrets .pullSecrets }} + {{- end }} +{{- end }} +{{- if $pullSecrets }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..d6165a2 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = cat $name .defaultNameSuffix -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml new file mode 100644 index 0000000..a936299 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,4 @@ +commonAnnotations: + helm.sh/hook: "pre-install, pre-upgrade" + helm.sh/hook-weight: "-1" + diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 0000000..347d3b4 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/README.md b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/README.md new file mode 100644 index 0000000..1813a2f --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md new file mode 100644 index 0000000..184c187 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..cba3809 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.lock b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.lock new file mode 100644 index 0000000..1069b62 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.3.1 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-07-15T00:56:02.067804177Z" diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.yaml new file mode 100644 index 0000000..868eee6 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..6dec604 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,54 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.imxc.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.imxc.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace imxc --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..a7008a1 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,494 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml new file mode 100644 index 0000000..b29ef60 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 0000000..f21a976 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 0000000..6637867 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 0000000..6b7a317 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 0000000..b993c99 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 0000000..2a7b372 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..da0b3ab --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 0000000..b0c41b1 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/pv.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/pv.yaml new file mode 100644 index 0000000..ddd7d7c --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/pv.yaml @@ -0,0 +1,27 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: keycloak-saas +spec: + storageClassName: manual + capacity: + storage: 8Gi + accessModes: + - ReadWriteOnce + #- ReadWriteMany + hostPath: + #path: "/home/keycloak/keycloak" + path: /mnt/keycloak-postgresql + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + #- imxc-worker1 + - {{ .Values.node.affinity }} + claimRef: + name: data-keycloak-saas-postgresql-0 + #namespace: auth + diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/role.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/role.yaml new file mode 100644 index 0000000..6d3cf50 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 0000000..b7daa2a --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: imxc +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..c93dbe0 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..17f7ff3 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 0000000..3e643e1 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - imxc + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 0000000..a712a03 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,340 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 0000000..35c6293 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,510 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "postgresql.tplValue" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "postgresql.tplValue" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + #claimName: {{ tpl . $ }} + claimName: data-keycloak-saas-postgresql-0 +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 0000000..4913157 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 0000000..885c7bb --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc.yaml new file mode 100644 index 0000000..e9fc504 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values-production.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values-production.yaml new file mode 100644 index 0000000..a43670f --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values-production.yaml @@ -0,0 +1,591 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: postgresql # bitnami/postgresql + tag: 11.8.0-debian-10-r61 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.schema.json b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.schema.json new file mode 100644 index 0000000..7b5e2ef --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.yaml b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.yaml new file mode 100644 index 0000000..5f831ef --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/charts/postgresql/values.yaml @@ -0,0 +1,604 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + #registry: cdm-dev.exem-oss.org/keycloak + registry: 10.10.31.243:5000/keycloak # registry.openstacklocal:5000/keycloak + repository: keycloak-postgresql + tag: 11.8.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + #registry: cdm-dev.exem-oss.org + registry: 10.10.31.243:5000 # registry.openstacklocal:5000 + repository: minideb # keycloak/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data +#postgresqlDataDir: /var/lib/postgresql/data/pgdata + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +#persistentVolume nodeAffinity Value Require this value +node: + affinity: imxc-worker1 diff --git a/roles/cmoa_demo_install/files/04-keycloak/ci/h2-values.yaml b/roles/cmoa_demo_install/files/04-keycloak/ci/h2-values.yaml new file mode 100644 index 0000000..10d1705 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/ci/h2-values.yaml @@ -0,0 +1,38 @@ +extraEnv: | + - name: DB_VENDOR + value: h2 + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + annotations: + my-test-annotation: Test secret for {{ include "keycloak.fullname" . }} + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: false + +test: + enabled: true diff --git a/roles/cmoa_demo_install/files/04-keycloak/ci/postgres-ha-values.yaml b/roles/cmoa_demo_install/files/04-keycloak/ci/postgres-ha-values.yaml new file mode 100644 index 0000000..e92c2c7 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/ci/postgres-ha-values.yaml @@ -0,0 +1,73 @@ +replicas: 2 + +podLabels: + test-label: test-label-value + +podAnnotations: + test-annotation: test-annotation-value-{{ .Release.Name }} + test-int-annotation: "12345" + +startupScripts: + hello.sh: | + #!/bin/sh + + echo '********************************************************************************' + echo '* *' + echo '* Hello from my startup script! *' + echo '* *' + echo '********************************************************************************' + +lifecycleHooks: | + postStart: + exec: + command: + - /bin/sh + - -c + - echo 'Hello from lifecycle hook!' + +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: KEYCLOAK_STATISTICS + value: all + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: true + persistence: + enabled: true + +test: + enabled: true diff --git a/roles/cmoa_demo_install/files/04-keycloak/requirements.lock b/roles/cmoa_demo_install/files/04-keycloak/requirements.lock new file mode 100644 index 0000000..4231a57 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.1 +digest: sha256:33ee9e6caa9e519633071fd71aedd9de7906b9a9d7fb629eb814d9f72bb8d68e +generated: "2020-07-24T07:40:55.78753+02:00" diff --git a/roles/cmoa_demo_install/files/04-keycloak/requirements.yaml b/roles/cmoa_demo_install/files/04-keycloak/requirements.yaml new file mode 100644 index 0000000..f3409a3 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: postgresql + version: 9.1.1 + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled diff --git a/roles/cmoa_demo_install/files/04-keycloak/scripts/keycloak.cli b/roles/cmoa_demo_install/files/04-keycloak/scripts/keycloak.cli new file mode 100644 index 0000000..1469963 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/scripts/keycloak.cli @@ -0,0 +1,13 @@ +embed-server --server-config=standalone-ha.xml --std-out=echo +batch + +echo Configuring node identifier + +## Sets the node identifier to the node name (= pod name). Node identifiers have to be unique. They can have a +## maximum length of 23 characters. Thus, the chart's fullname template truncates its length accordingly. +/subsystem=transactions:write-attribute(name=node-identifier, value=${jboss.node.name}) + +echo Finished configuring node identifier + +run-batch +stop-embedded-server diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/NOTES.txt b/roles/cmoa_demo_install/files/04-keycloak/templates/NOTES.txt new file mode 100644 index 0000000..e76e064 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/NOTES.txt @@ -0,0 +1,61 @@ +*********************************************************************** +* * +* Keycloak Helm Chart by codecentric AG * +* * +*********************************************************************** + +{{- if .Values.ingress.enabled }} + +Keycloak was installed with an Ingress and an be reached at the following URL(s): +{{ range $unused, $rule := .Values.ingress.rules }} + {{- range $rule.paths }} + - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $rule.host }}{{ . }} + {{- end }} +{{- end }} + +{{- else if eq "NodePort" .Values.service.type }} + +Keycloak was installed with a Service of type NodePort. +{{ if .Values.service.httpNodePort }} +Get its HTTP URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"http\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} +{{ if .Values.service.httpsNodePort }} +Get its HTTPS URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"https\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} + +{{- else if eq "LoadBalancer" .Values.service.type }} + +Keycloak was installed with a Service of type LoadBalancer + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace imxc service -w {{ include "keycloak.fullname" . }}' + +Get its HTTP URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpPort }}" + +Get its HTTPS URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpsPort }}" + +{{- else if eq "ClusterIP" .Values.service.type }} + +Keycloak was installed with a Service of type ClusterIP + +Create a port-forwarding with the following commands: + +export POD_NAME=$(kubectl get pods --namespace imxc -l "app.kubernetes.io/name={{ include "keycloak.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o name) +echo "Visit http://127.0.0.1:8080 to use your application" +kubectl --namespace imxc port-forward "$POD_NAME" 8080 + +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/_helpers.tpl b/roles/cmoa_demo_install/files/04-keycloak/templates/_helpers.tpl new file mode 100644 index 0000000..d019e17 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/_helpers.tpl @@ -0,0 +1,87 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "keycloak.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate to 20 characters because this is used to set the node identifier in WildFly which is limited to +23 characters. This allows for a replica suffix for up to 99 replicas. +*/}} +{{- define "keycloak.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 20 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "keycloak.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "keycloak.labels" -}} +helm.sh/chart: {{ include "keycloak.chart" . }} +{{ include "keycloak.selectorLabels" . }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "keycloak.selectorLabels" -}} +app.kubernetes.io/name: {{ include "keycloak.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "keycloak.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "keycloak.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for the postgres requirement. +*/}} +{{- define "keycloak.postgresql.fullname" -}} +{{- $postgresContext := dict "Values" .Values.postgresql "Release" .Release "Chart" (dict "Name" "postgresql") -}} +{{ include "postgresql.fullname" $postgresContext }} +{{- end }} + +{{/* +Create the service DNS name. +*/}} +{{- define "keycloak.serviceDnsName" -}} +{{ include "keycloak.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "keycloak.ingressAPIVersion" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- print "networking.k8s.io/v1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/configmap-startup.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/configmap-startup.yaml new file mode 100644 index 0000000..8fbb462 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/configmap-startup.yaml @@ -0,0 +1,14 @@ +{{- if .Values.startupScripts }} +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-startup + labels: + {{- include "keycloak.labels" . | nindent 4 }} +data: + {{- range $key, $value := .Values.startupScripts }} + {{ $key }}: | + {{- tpl $value $ | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/hpa.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/hpa.yaml new file mode 100644 index 0000000..c772b76 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/hpa.yaml @@ -0,0 +1,22 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.autoscaling.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include "keycloak.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- toYaml .Values.autoscaling.metrics | nindent 4 }} + behavior: + {{- toYaml .Values.autoscaling.behavior | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/ingress.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/ingress.yaml new file mode 100644 index 0000000..d749e24 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/ingress.yaml @@ -0,0 +1,104 @@ +{{- $ingress := .Values.ingress -}} +{{- if $ingress.enabled -}} +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- if $ingress.console.enabled }} +--- +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }}-console + {{- with $ingress.console.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.console.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/networkpolicy.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/networkpolicy.yaml new file mode 100644 index 0000000..5e7c7b6 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/networkpolicy.yaml @@ -0,0 +1,46 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "keycloak.fullname" . | quote }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.networkPolicy.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + ingress: + {{- with .Values.networkPolicy.extraFrom }} + - from: + {{- toYaml . | nindent 8 }} + ports: + - protocol: TCP + port: {{ $.Values.service.httpPort }} + - protocol: TCP + port: {{ $.Values.service.httpsPort }} + {{ range $.Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} + {{- end }} + - from: + - podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 14 }} + ports: + - protocol: TCP + port: {{ .Values.service.httpPort }} + - protocol: TCP + port: {{ .Values.service.httpsPort }} + - protocol: TCP + port: {{ .Values.service.httpManagementPort }} + {{ range .Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/poddisruptionbudget.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..39cc390 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/poddisruptionbudget.yaml @@ -0,0 +1,13 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/prometheusrule.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/prometheusrule.yaml new file mode 100644 index 0000000..69af5e7 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- with .Values.prometheusRule -}} +{{- if .enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "keycloak.fullname" $ }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "keycloak.fullname" $ }} + rules: + {{- toYaml .rules | nindent 8 }} +{{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/rbac.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/rbac.yaml new file mode 100644 index 0000000..9ca0a2b --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/rbac.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create .Values.rbac.rules }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +rules: + {{- toYaml .Values.rbac.rules | nindent 2 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "keycloak.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "keycloak.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/route.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/route.yaml new file mode 100644 index 0000000..9507d56 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/route.yaml @@ -0,0 +1,34 @@ +{{- $route := .Values.route -}} +{{- if $route.enabled -}} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $route.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $route.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $route.host }} + host: {{ tpl $route.host $ | quote }} +{{- end }} + path: {{ $route.path }} + port: + targetPort: http + to: + kind: Service + name: {{ include "keycloak.fullname" $ }}-http + weight: 100 + {{- if $route.tls.enabled }} + tls: + insecureEdgeTerminationPolicy: {{ $route.tls.insecureEdgeTerminationPolicy }} + termination: {{ $route.tls.termination }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/secrets.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/secrets.yaml new file mode 100644 index 0000000..c1cb796 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/secrets.yaml @@ -0,0 +1,29 @@ +{{- range $nameSuffix, $values := .Values.secrets -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $nameSuffix }} + {{- with $values.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := $values.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +type: {{ default "Opaque" $values.type }} +{{- with $values.data }} +data: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with $values.stringData }} +stringData: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 2 }} + {{- end }} +{{- end }} +--- +{{- end -}} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/service-headless.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/service-headless.yaml new file mode 100644 index 0000000..0c22ec9 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/service-headless.yaml @@ -0,0 +1,18 @@ +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-headless + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: headless +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + protocol: TCP + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/service-http.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/service-http.yaml new file mode 100644 index 0000000..c4a1dc9 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/service-http.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-http + {{- with .Values.service.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.service.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: http +spec: + type: {{ .Values.service.type }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- with .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpNodePort }} + nodePort: {{ .Values.service.httpNodePort }} + {{- end }} + protocol: TCP + - name: https + port: {{ .Values.service.httpsPort }} + targetPort: https + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpsNodePort }} + nodePort: {{ .Values.service.httpsNodePort }} + {{- end }} + protocol: TCP + - name: http-management + port: {{ .Values.service.httpManagementPort }} + targetPort: http-management + {{- if and (eq "NodePort" .Values.service.type) .Values.service.httpManagementNodePort }} + nodePort: {{ .Values.service.httpManagementNodePort }} + {{- end }} + protocol: TCP + {{- with .Values.service.extraPorts }} + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/serviceaccount.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/serviceaccount.yaml new file mode 100644 index 0000000..1d8f3f0 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "keycloak.serviceAccountName" . }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.serviceAccount.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +imagePullSecrets: + {{- toYaml .Values.serviceAccount.imagePullSecrets | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/servicemonitor.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/servicemonitor.yaml new file mode 100644 index 0000000..ba97f62 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- range $key, $serviceMonitor := dict "wildfly" .Values.serviceMonitor "extra" .Values.extraServiceMonitor }} +{{- with $serviceMonitor }} +{{- if .enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $key }} + {{- with .namespace }} + namespace: {{ . }} + {{- end }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + {{- with .namespaceSelector }} + namespaceSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "keycloak.selectorLabels" $ | nindent 6 }} + app.kubernetes.io/component: http + endpoints: + - port: {{ .port }} + path: {{ .path }} + interval: {{ .interval }} + scrapeTimeout: {{ .scrapeTimeout }} +{{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/statefulset.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/statefulset.yaml new file mode 100644 index 0000000..8278986 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/statefulset.yaml @@ -0,0 +1,208 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with .Values.statefulsetAnnotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.statefulsetLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicas }} + {{- end }} + serviceName: {{ include "keycloak.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config-startup: {{ include (print .Template.BasePath "/configmap-startup.yaml") . | sha256sum }} + checksum/secrets: {{ tpl (toYaml .Values.secrets) . | sha256sum }} + {{- range $key, $value := .Values.podAnnotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + labels: + {{- include "keycloak.selectorLabels" . | nindent 8 }} + {{- if and .Values.postgresql.enabled (and .Values.postgresql.networkPolicy .Values.postgresql.networkPolicy.enabled) }} + {{ include "keycloak.postgresql.fullname" . }}-client: "true" + {{- end }} + {{- range $key, $value := .Values.podLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + spec: + {{- if or .Values.postgresql.enabled .Values.extraInitContainers }} + initContainers: + {{- if .Values.postgresql.enabled }} + - name: pgchecker + image: "{{ .Values.pgchecker.image.repository }}:{{ .Values.pgchecker.image.tag }}" + imagePullPolicy: {{ .Values.pgchecker.image.pullPolicy }} + securityContext: + {{- toYaml .Values.pgchecker.securityContext | nindent 12 }} + command: + - sh + - -c + - | + echo 'Waiting for PostgreSQL to become ready...' + + until printf "." && nc -z -w 2 {{ include "keycloak.postgresql.fullname" . }} {{ .Values.postgresql.service.port }}; do + sleep 2; + done; + + echo 'PostgreSQL OK ✓' + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + resources: + {{- toYaml .Values.pgchecker.resources | nindent 12 }} + {{- end }} + {{- with .Values.extraInitContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: keycloak + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + {{- toYaml .Values.command | nindent 12 }} + args: + {{- toYaml .Values.args | nindent 12 }} + {{- with .Values.lifecycleHooks }} + {{- tpl . $ | nindent 12 }} + {{- end }} + env: + - name: KEYCLOAK_USER + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_USERNAME + - name: KEYCLOAK_PASSWORD + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_PASSWORD + {{- if .Values.postgresql.enabled }} + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: {{ include "keycloak.postgresql.fullname" . }} + - name: DB_PORT + value: {{ .Values.postgresql.service.port | quote }} + - name: DB_DATABASE + value: {{ .Values.postgresql.postgresqlDatabase | quote }} + - name: DB_USER + value: {{ .Values.postgresql.postgresqlUsername | quote }} + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.postgresql.fullname" . }} + key: postgresql-password + {{- end }} + {{- with .Values.extraEnv }} + {{- tpl . $ | nindent 12 }} + {{- end }} + envFrom: + {{- with .Values.extraEnvFrom }} + {{- tpl . $ | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: https + containerPort: 8443 + protocol: TCP + - name: http-management + containerPort: 9990 + protocol: TCP + {{- with .Values.extraPorts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + {{- range $key, $value := .Values.startupScripts }} + - name: startup + mountPath: "/opt/jboss/startup-scripts/{{ $key }}" + subPath: "{{ $key }}" + readOnly: true + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.extraContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "keycloak.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- with .Values.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + restartPolicy: {{ .Values.restartPolicy }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + volumes: + - name: themes-upper-directory + hostPath: + path: /root/oci/infra-set/keycloak/keycloak_theme/ + type: DirectoryOrCreate + {{- with .Values.startupScripts }} + - name: startup + configMap: + name: {{ include "keycloak.fullname" $ }}-startup + defaultMode: 0555 + items: + {{- range $key, $value := . }} + - key: {{ $key }} + path: {{ $key }} + {{- end }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- tpl . $ | nindent 8 }} + {{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/test/configmap-test.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/test/configmap-test.yaml new file mode 100644 index 0000000..8dda781 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/test/configmap-test.yaml @@ -0,0 +1,50 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + annotations: + helm.sh/hook: test + helm.sh/hook-delete-policy: hook-succeeded +data: + test.py: | + import os + from selenium import webdriver + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions + from urllib.parse import urlparse + + print('Creating PhantomJS driver...') + driver = webdriver.PhantomJS(service_log_path='/tmp/ghostdriver.log') + + base_url = 'http://{{ include "keycloak.fullname" . }}-http{{ if ne 80 (int .Values.service.httpPort) }}:{{ .Values.service.httpPort }}{{ end }}' + + print('Opening Keycloak...') + driver.get('{0}/auth/admin/'.format(base_url)) + + username = os.environ['KEYCLOAK_USER'] + password = os.environ['KEYCLOAK_PASSWORD'] + + username_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "username"))) + password_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "password"))) + login_button = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "kc-login"))) + + print('Entering username...') + username_input.send_keys(username) + + print('Entering password...') + password_input.send_keys(password) + + print('Clicking login button...') + login_button.click() + + WebDriverWait(driver, 30).until(lambda driver: '/auth/admin/master/console/' in driver.current_url) + + print('Admin console visible. Login successful.') + + driver.quit() + + {{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/templates/test/pod-test.yaml b/roles/cmoa_demo_install/files/04-keycloak/templates/test/pod-test.yaml new file mode 100644 index 0000000..5b166f2 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/templates/test/pod-test.yaml @@ -0,0 +1,43 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: test + annotations: + helm.sh/hook: test +spec: + securityContext: + {{- toYaml .Values.test.podSecurityContext | nindent 4 }} + containers: + - name: keycloak-test + image: "{{ .Values.test.image.repository }}:{{ .Values.test.image.tag }}" + imagePullPolicy: {{ .Values.test.image.pullPolicy }} + securityContext: + {{- toYaml .Values.test.securityContext | nindent 8 }} + command: + - python3 + args: + - /tests/test.py + env: + - name: KEYCLOAK_USER + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: user + - name: KEYCLOAK_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: password + volumeMounts: + - name: tests + mountPath: /tests + volumes: + - name: tests + configMap: + name: {{ include "keycloak.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/roles/cmoa_demo_install/files/04-keycloak/values.schema.json b/roles/cmoa_demo_install/files/04-keycloak/values.schema.json new file mode 100644 index 0000000..47c2aa3 --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/values.schema.json @@ -0,0 +1,434 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "required": [ + "image" + ], + "definitions": { + "image": { + "type": "object", + "required": [ + "repository", + "tag" + ], + "properties": { + "pullPolicy": { + "type": "string", + "pattern": "^(Always|Never|IfNotPresent)$" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + } + } + }, + "properties": { + "affinity": { + "type": "string" + }, + "args": { + "type": "array" + }, + "clusterDomain": { + "type": "string" + }, + "command": { + "type": "array" + }, + "enableServiceLinks": { + "type": "boolean" + }, + "extraContainers": { + "type": "string" + }, + "extraEnv": { + "type": "string" + }, + "extraEnvFrom": { + "type": "string" + }, + "extraInitContainers": { + "type": "string" + }, + "extraPorts": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "string" + }, + "extraVolumes": { + "type": "string" + }, + "fullnameOverride": { + "type": "string" + }, + "hostAliases": { + "type": "array" + }, + "image": { + "$ref": "#/definitions/image" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "ingress": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "paths": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "servicePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "tls": { + "type": "array", + "items": { + "type": "object", + "properties": { + "hosts": { + "type": "array", + "items": { + "items": { + "type": "string" + } + }, + "secretName": { + "type": "string" + } + } + } + } + } + }, + "lifecycleHooks": { + "type": "string" + }, + "livenessProbe": { + "type": "string" + }, + "nameOverride": { + "type": "string" + }, + "nodeSelector": { + "type": "object" + }, + "pgchecker": { + "type": "object", + "properties": { + "image": { + "$ref": "#/definitions/image" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "securityContext": { + "type": "object" + } + } + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podManagementPolicy": { + "type": "string" + }, + "podSecurityContext": { + "type": "object" + }, + "postgresql": { + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "prometheusRule": { + "type": "object" + }, + "serviceMonitor": { + "type": "object" + }, + "extraServiceMonitor": { + "type": "object" + }, + "readinessProbe": { + "type": "string" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "restartPolicy": { + "type": "string" + }, + "route": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "host": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "path": { + "type": "string" + }, + "tls": { + "type": "object" + } + } + }, + "secrets": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "service": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "extraPorts": { + "type": "array" + }, + "loadBalancerSourceRanges": { + "type": "array" + }, + "httpNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpPort": { + "type": "integer" + }, + "httpsNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpsPort": { + "type": "integer" + }, + "httpManagementNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpManagementPort": { + "type": "integer" + }, + "labels": { + "type": "object" + }, + "nodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "type": { + "type": "string" + }, + "loadBalancerIP": { + "type": "string" + }, + "sessionAffinity": { + "type": "string" + }, + "sessionAffinityConfig": { + "type": "object" + } + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "create": { + "type": "boolean" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "labels": { + "type": "object" + }, + "name": { + "type": "string" + } + } + }, + "rbac": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "rules": { + "type": "array" + } + } + }, + "startupScripts": { + "type": "object" + }, + "statefulsetAnnotations": { + "type": "object" + }, + "statefulsetLabels": { + "type": "object" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "autoscaling": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "minReplicas": { + "type": "integer" + }, + "maxReplicas": { + "type": "integer" + }, + "metrics": { + "type": "array" + }, + "behavior": { + "type": "object" + } + } + }, + "test": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "image": { + "$ref": "#/definitions/image" + }, + "podSecurityContext": { + "type": "object" + }, + "securityContext": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array" + } + } + } +} diff --git a/roles/cmoa_demo_install/files/04-keycloak/values.yaml b/roles/cmoa_demo_install/files/04-keycloak/values.yaml new file mode 100644 index 0000000..a95521f --- /dev/null +++ b/roles/cmoa_demo_install/files/04-keycloak/values.yaml @@ -0,0 +1,552 @@ +# Optionally override the fully qualified name +fullnameOverride: "imxc-keycloak" + +# Optionally override the name +nameOverride: "" + +# The number of replicas to create (has no effect if autoscaling enabled) +replicas: 2 + +image: + # The Keycloak image repository + #repository: cdm-dev.exem-oss.org/keycloak/keycloak + repository: 10.10.31.243:5000/cmoa3/keycloak + # Overrides the Keycloak image tag whose default is the chart version + tag: "11.0.1" + # The Keycloak image pull policy + pullPolicy: Always + +# Image pull secrets for the Pod +#imagePullSecrets: [] +# - name: myRegistrKeySecretName +imagePullSecrets: + - name: regcred + +# Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files +hostAliases: [] +# - ip: "1.2.3.4" +# hostnames: +# - "my.host.com" + +# Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links +enableServiceLinks: true + +# Pod management policy. One of `Parallel` or `OrderedReady` +podManagementPolicy: Parallel + +# Pod restart policy. One of `Always`, `OnFailure`, or `Never` +restartPolicy: Always + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Additional annotations for the ServiceAccount + annotations: {} + # Additional labels for the ServiceAccount + labels: {} + # Image pull secrets that are attached to the ServiceAccount + #imagePullSecrets: [] + imagePullSecrets: + - name: regcred + +rbac: + create: true + rules: + # RBAC rules for KUBE_PING + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + +# SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) +podSecurityContext: + fsGroup: 1000 + +# SecurityContext for the Keycloak container +securityContext: + runAsUser: 1000 + runAsNonRoot: true + +# Additional init containers, e. g. for providing custom themes +extraInitContainers: | + - name: theme-provider + image: 10.10.31.243:5000/cmoa3/theme-provider:latest + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme ..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +#extraInitContainers: "" + +# Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy +extraContainers: "" + +# Lifecycle hooks for the Keycloak container +lifecycleHooks: | +# postStart: +# exec: +# command: +# - /bin/sh +# - -c +# - ls + +# Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance +terminationGracePeriodSeconds: 60 + +# The internal Kubernetes cluster domain +clusterDomain: cluster.local + +## Overrides the default entrypoint of the Keycloak container +command: [] + +## Overrides the default args for the Keycloak container +#args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled", "-Dkeycloak.profile.feature.admin_fine_grained_authz=enabled"] +args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled"] + +# Additional environment variables for Keycloak +extraEnv: | + # HA settings + - name: PROXY_ADDRESS_FORWARDING + value: "true" + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + # postgresql settings + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: keycloak + - name: DB_USER + value: admin + - name: DB_PASSWORD + value: eorbahrhkswp +# - name: KEYCLOAK_USER +# value: keycloak +# - name: KEYCLOAK_PASSWORD +# value: keycloak +#extraEnv: "" + # - name: KEYCLOAK_LOGLEVEL + # value: DEBUG + # - name: WILDFLY_LOGLEVEL + # value: DEBUG + # - name: CACHE_OWNERS_COUNT + # value: "2" + # - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + # value: "2" +#extraEnv: | +# - name: JGROUPS_DISCOVERY_PROTOCOL +# value: dns.DNS_PING +# - name: JGROUPS_DISCOVERY_PROPERTIES +# value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' +# - name: CACHE_OWNERS_COUNT +# value: "2" +# - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT +# value: "2" +# Additional environment variables for Keycloak mapped from Secret or ConfigMap +extraEnvFrom: "" + +# Pod priority class name +#priorityClassName: "manual" + +# Pod affinity +affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 10 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 12 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: failure-domain.beta.kubernetes.io/zone + +#affinity: {} + +# Node labels for Pod assignment +nodeSelector: {} + +# Node taints to tolerate +tolerations: [] + +# Additional Pod labels +podLabels: {} + +# Additional Pod annotations +podAnnotations: {} + +# Liveness probe configuration +livenessProbe: | + httpGet: + path: /auth/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +# Readiness probe configuration +readinessProbe: | + httpGet: + path: /auth/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 + +# Pod resource requests and limits +#resources: {} + # requests: + # cpu: "500m" + # memory: "1024Mi" + # limits: + # cpu: "500m" + # memory: "1024Mi" +resources: + requests: + memory: "200Mi" + cpu: "10m" + +# Startup scripts to run before Keycloak starts up +startupScripts: + # WildFly CLI script for configuring the node-identifier + keycloak.cli: | + {{- .Files.Get "scripts/keycloak.cli" }} + # mystartup.sh: | + # #!/bin/sh + # + # echo 'Hello from my custom startup script!' + +# Add additional volumes, e. g. for custom themes +extraVolumes: | + - name: theme + emptyDir: {} +#extraVolumes: "" + +# Add additional volumes mounts, e. g. for custom themes +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes +#extraVolumeMounts: "" + +# Add additional ports, e. g. for admin console or exposing JGroups ports +extraPorts: [] + +# Pod disruption budget +podDisruptionBudget: {} +# maxUnavailable: 1 +# minAvailable: 1 + +# Annotations for the StatefulSet +statefulsetAnnotations: {} + +# Additional labels for the StatefulSet +statefulsetLabels: {} + +# Configuration for secrets that should be created +secrets: {} + # mysecret: + # type: {} + # annotations: {} + # labels: {} + # stringData: {} + # data: {} + +service: + # Annotations for headless and HTTP Services + annotations: {} + # Additional labels for headless and HTTP Services + labels: {} + # key: value + # The Service type + type: NodePort + # Optional IP for the load balancer. Used for services of type LoadBalancer only + loadBalancerIP: "" + # The http Service port + httpPort: 80 + # The HTTP Service node port if type is NodePort + httpNodePort: 31082 + # The HTTPS Service port + httpsPort: 8443 + # The HTTPS Service node port if type is NodePort + httpsNodePort: null + # The WildFly management Service port + httpManagementPort: 9990 + # The WildFly management Service node port if type is NodePort + httpManagementNodePort: 31990 + # Additional Service ports, e. g. for custom admin console + extraPorts: [] + # When using Service type LoadBalancer, you can restrict source ranges allowed + # to connect to the LoadBalancer, e. g. will result in Security Groups + # (or equivalent) with inbound source ranges allowed to connect + loadBalancerSourceRanges: [] + # Session affinity + # See https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-userspace + sessionAffinity: "" + # Session affinity config + sessionAffinityConfig: {} + +ingress: + # If `true`, an Ingress is created + enabled: false + # The Service port targeted by the Ingress + servicePort: http + # Ingress annotations + annotations: {} + ## Resolve HTTP 502 error using ingress-nginx: + ## See https://www.ibm.com/support/pages/502-error-ingress-keycloak-response + # nginx.ingress.kubernetes.io/proxy-buffer-size: 128k + + # Additional Ingress labels + labels: {} + # List of rules for the Ingress + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - / + # TLS configuration + tls: + - hosts: + - keycloak.example.com + secretName: "" + + # ingress for console only (/auth/admin) + console: + # If `true`, an Ingress is created for console path only + enabled: false + # Ingress annotations for console ingress only + # Useful to set nginx.ingress.kubernetes.io/whitelist-source-range particularly + annotations: {} + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - /auth/admin/ + +## Network policy configuration +networkPolicy: + # If true, the Network policies are deployed + enabled: false + + # Additional Network policy labels + labels: {} + + # Define all other external allowed source + # See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#networkpolicypeer-v1-networking-k8s-io + extraFrom: [] + +route: + # If `true`, an OpenShift Route is created + enabled: false + # Path for the Route + path: / + # Route annotations + annotations: {} + # Additional Route labels + labels: {} + # Host name for the Route + host: "" + # TLS configuration + tls: + # If `true`, TLS is enabled for the Route + enabled: false + # Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` + insecureEdgeTerminationPolicy: Redirect + # TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` + termination: edge + +pgchecker: + image: + # Docker image used to check Postgresql readiness at startup + #repository: cdm-dev.exem-oss.org/keycloak/busybox + #repository: {{ .Values.global.IMXC_REGISTRY }}/keycloak/busybox + repository: 10.10.31.243:5000/cmoa3/busybox + # Image tag for the pgchecker image + tag: 1.32 + # Image pull policy for the pgchecker image + pullPolicy: Always + # SecurityContext for the pgchecker contai/docker.ner + securityContext: + allowPrivilegeEscalation: false + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + # Resource requests and limits for the pgchecker container + resources: + requests: + cpu: "10m" + memory: "16Mi" + limits: + cpu: "10m" + memory: "16Mi" + +postgresql: + # If `true`, the Postgresql dependency is enabled + enabled: false + # PostgreSQL User to create + postgresqlUsername: keycloak + # PostgreSQL Password for the new user + postgresqlPassword: keycloak + # PostgreSQL Database to create + postgresqlDatabase: keycloak + # PostgreSQL network policy configuration + networkPolicy: + enabled: false + +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /metrics + # The Service port at which metrics are served + port: http-management + +extraServiceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /auth/realms/master/metrics + # The Service port at which metrics are served + port: http + +prometheusRule: + # If `true`, a PrometheusRule resource for the prometheus-operator is created + enabled: false + # Annotations for the PrometheusRule + annotations: {} + # Additional labels for the PrometheusRule + labels: {} + # List of rules for Prometheus + rules: [] + # - alert: keycloak-IngressHigh5xxRate + # annotations: + # message: The percentage of 5xx errors for keycloak over the last 5 minutes is over 1%. + # expr: | + # ( + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak",status=~"5[0-9]{2}"}[1m] + # ) + # ) + # / + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak"}[1m] + # ) + # ) + # ) * 100 > 1 + # for: 5m + # labels: + # severity: warning + +autoscaling: + # If `true`, a autoscaling/v2beta2 HorizontalPodAutoscaler resource is created (requires Kubernetes 1.18 or above) + # Autoscaling seems to be most reliable when using KUBE_PING service discovery (see README for details) + # This disables the `replicas` field in the StatefulSet + enabled: false + # Additional HorizontalPodAutoscaler labels + labels: {} + # The minimum and maximum number of replicas for the Keycloak StatefulSet + minReplicas: 3 + maxReplicas: 10 + # The metrics to use for scaling + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + # The scaling policy to use. This will scale up quickly but only scale down a single Pod per 5 minutes. + # This is important because caches are usually only replicated to 2 Pods and if one of those Pods is terminated this will give the cluster time to recover. + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 300 + +test: + # If `true`, test resources are created + enabled: false + image: + # The image for the test Pod + #repository: docker.io/unguiculus/docker-python3-phantomjs-selenium + repository: 10.10.31.243:5000/docker-python3-phantomjs-selenium + # The tag for the test Pod image + tag: v1 + # The image pull policy for the test Pod image + pullPolicy: IfNotPresent + # SecurityContext for the entire test Pod + podSecurityContext: + fsGroup: 1000 + # SecurityContext for the test container + securityContext: + runAsUser: 1000 + runAsNonRoot: true + diff --git a/roles/cmoa_demo_install/files/05-imxc/Chart.yaml b/roles/cmoa_demo_install/files/05-imxc/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/roles/cmoa_demo_install/files/05-imxc/cmoa-manual.yaml b/roles/cmoa_demo_install/files/05-imxc/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/roles/cmoa_demo_install/files/05-imxc/scripts/init-api-server.sh b/roles/cmoa_demo_install/files/05-imxc/scripts/init-api-server.sh new file mode 100644 index 0000000..78a9962 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/scripts/init-api-server.sh @@ -0,0 +1,17 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + /sbin/tini -- java -Djava.security.egd=file:/dev/./urandom -jar /app.jar + #java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/roles/cmoa_demo_install/files/05-imxc/scripts/init-auth-server.sh b/roles/cmoa_demo_install/files/05-imxc/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/roles/cmoa_demo_install/files/05-imxc/scripts/init-noti-server.sh b/roles/cmoa_demo_install/files/05-imxc/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/05-imxc/scripts/init-resource.sh b/roles/cmoa_demo_install/files/05-imxc/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/05-imxc/scripts/init.json b/roles/cmoa_demo_install/files/05-imxc/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/auth-server.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/auth-server.yaml new file mode 100644 index 0000000..fb8fe7b --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/auth-server.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: auth-server + namespace: imxc +spec: + selector: + matchLabels: + app: auth + replicas: 1 + template: + metadata: + labels: + app: auth + spec: + initContainers: + - name: init-resource + image: {{ .Values.global.IMXC_IN_REGISTRY }}/init-resource:latest + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ['chmod -R 777 /scripts; cp /scripts/init.json /tmp/init.json'] + volumeMounts: + - name: init + mountPath: /tmp + containers: + - name: auth-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/auth-server:{{ .Values.global.AUTH_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-auth-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # imxc-api-server configuration + - name: IMXC_API-SERVER-URL + value: http://imxc-api-service:8080 + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_REPO + value: debug + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_AUTH_AUTHENTICATION_USER_SERVICE + value: debug + # 현대카드는 커스텀으로 해당 값 추가. keycloak만 사용(true), keycloak+내부db 사용(false) + - name: IMXC_KEYCLOAK_ENABLED + value: "true" + + volumeMounts: + - name: init + mountPath: /tmp + resources: + requests: + memory: "200Mi" + cpu: "10m" + + volumes: + - name: init + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: auth-server-service + namespace: imxc +spec: + type: ClusterIP + selector: + app: auth + ports: + - protocol: TCP + port: 8480 + # nodePort: 15016 diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-datagate.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-datagate.yaml new file mode 100644 index 0000000..cbbee9a --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-datagate.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + selector: + matchLabels: + app: datagate + replicas: 2 + template: + metadata: + labels: + app: datagate + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/datagate:{{ .Values.global.DATAGATE_VERSION }} + imagePullPolicy: IfNotPresent + name: datagate + ports: + - containerPort: 50051 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: kafka-broker:9094 + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "2000m" + memory: "1Gi" +--- +apiVersion: v1 +kind: Service +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + ports: + - name: datagate-grpc + port: 50051 + protocol: TCP + targetPort: 50051 + nodePort: 30051 + - name: datagate-http + port: 14268 + targetPort: 14268 +# nodePort: 31268 + - name: datagate-readiness + port: 14269 + targetPort: 14269 + selector: + app: datagate + type: NodePort diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml new file mode 100644 index 0000000..45c3d41 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml @@ -0,0 +1,331 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + selector: + matchLabels: + app: metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: metric-agent + spec: + containers: + - name: metric-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-agent:{{ .Values.global.METRIC_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14271 + - containerPort: 14272 + args: + - --config.file=/etc/metric-agent/metric-agent.yml + env: + - name: STORAGE_TYPE + value: datagate + - name: DATAGATE + value: datagate:50051 + - name: CLUSTER_ID + value: cloudmoa +# - name: USER_ID +# value: mskim@ex-em.com + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "1000Mi" + cpu: "300m" + volumes: + - name: config-volume + configMap: + name: metric-agent-config + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + ports: + - name: metric + port: 14271 + targetPort: 14271 + selector: + app: metric-agent + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: metric-agent-config + namespace: imxc +data: + metric-agent.yml: | + global: + scrape_interval: 10s + evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute. + + scrape_configs: + - job_name: 'kubernetes-kubelet' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: 'cloudmoa' + - target_label: xm_entity_type + replacement: 'Node' + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + + - job_name: 'kubernetes-node-exporter' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: '(.*):10250' + replacement: '${1}:9100' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: 'kubernetes-(.*)' + replacement: '${1}' + target_label: name + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Node' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: 'kubernetes-cadvisor' + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Container' + +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + + {{- else }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +{{- end }} + # CLOUD-8671 | 데이터 필터링 설정 추가 + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop + + - job_name: 'kafka-consumer' + metrics_path: /remote_prom + scrape_interval: 5s + scrape_timeout: 5s + scheme: kafka + static_configs: + - targets: ['kafka-broker:9094'] + params: + #server_addrs: ['broker.default.svc.k8s:9094'] + server_addrs: ['kafka-broker:9094'] + encoding: [proto3] + contents: [remote_write] + compression: [snappy] + group: [remote-write-consumer] + workers: [50] + + # job for API server (SpringBoot) commented by ersione 2019-09-19 + - job_name: 'imxc-api' + metrics_path: '/actuator/prometheus' + scrape_interval: 5s + static_configs: + - targets: ['imxc-api-service:8080'] + - job_name: 'imxc-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] + #- job_name: 'imxc-auth' + # metrics_path: '/actuator/prometheus' + # scrape_interval: 15s + # static_configs: + # - targets: ['auth-server-service:8480'] + + + + - job_name: 'alertmanager-exporter' + metrics_path: '/metrics' + scrape_interval: 5s + static_configs: + - targets: ['alertmanager:9093'] + + + # modified by seungtak choi 2020-02-18 + - job_name: 'cmoa-collector' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: cmoa-collector + + # added by dwkim 2021-03-15 + - job_name: 'elasticsearch' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_pod_node_name] + target_label: xm_node_id + - source_labels: [__meta_kubernetes_namespace] + target_label: xm_namespace + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: es-exporter-elasticsearch-exporter + + # kafka-exporter prometheus 수집 룰 추가 + - job_name: 'kafka-exporter' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9308' + + # kafka-jmx-exporter configuration yaml 수집룰 추가 + - job_name: 'kafka-jmx' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9010' + + # job for API Server(Spring Cloud Notification Server) commented by hjyoon 2022-01-26 + - job_name: 'cmoa-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml new file mode 100644 index 0000000..3d7acc8 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + selector: + matchLabels: + app: metric-collector + replicas: 3 + template: + metadata: + labels: + app: metric-collector + spec: + containers: + - name: metric-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-collector:{{ .Values.global.METRIC_COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14270 + env: + - name: KAFKA_CONSUMER_BROKERS + value: kafka-broker:9094 + - name: HTTP_PUSH + value: http://base-cortex-nginx/api/v1/push + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + ports: + - name: metric + port: 14270 + targetPort: 14270 + selector: + app: metric-collector diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml new file mode 100644 index 0000000..b20fed2 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-batch + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-batch +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-batch + template: + metadata: + labels: + app: cmoa-kube-info-batch + spec: + containers: + - name: cmoa-kube-info-batch + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-batch:{{ .Values.global.KUBE_INFO_BATCH_VERSION }} + imagePullPolicy: Always + env: + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: DELETE_HOUR + value: '{{ .Values.global.DELETE_HOUR }}' diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml new file mode 100644 index 0000000..cad91b9 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-connector + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-connector +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-connector + template: + metadata: + labels: + app: cmoa-kube-info-connector + spec: + containers: + - name: cmoa-kube-info-connector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-connector:{{ .Values.global.KUBE_INFO_CONNECTOR_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_GROUP_ID + value: cmoa-kube-info-connector + - name: KAFKA_SERVER + value: kafka:9092 + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: MAX_POLL_RECORDS_CONFIG + value: "300" + - name: MAX_POLL_INTERVAL_MS_CONFIG + value: "600000" + - name: SESSION_TIMEOUT_MS_CONFIG + value: "60000" + - name: MAX_PARTITION_FETCH_BYTES_CONFIG + value: "5242880" diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml new file mode 100644 index 0000000..6f77ee5 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-flat + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-flat +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-flat + template: + metadata: + labels: + app: cmoa-kube-info-flat + spec: + containers: + - name: cmoa-kube-info-flat + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-flat:{{ .Values.global.KUBE_INFO_FLAT_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_SERVER + value: kafka:9092 + - name: KAFKA_INPUT_TOPIC + value: {{ .Values.global.KAFKA_INPUT_TOPIC }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + resources: + limits: + memory: 1Gi + requests: + memory: 200Mi diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-manual.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/eureka-server.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/eureka-server.yaml new file mode 100644 index 0000000..5ffd9c2 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/eureka-server.yaml @@ -0,0 +1,60 @@ +apiVersion: v1 +kind: Service +metadata: + name: eureka + namespace: imxc + labels: + app: eureka +spec: + type: NodePort + ports: + - port: 8761 + targetPort: 8761 + nodePort: 30030 + name: eureka + selector: + app: eureka +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: eureka + namespace: imxc +spec: + serviceName: 'eureka' + replicas: 3 + selector: + matchLabels: + app: eureka + template: + metadata: + labels: + app: eureka + spec: + containers: + - name: eureka + image: {{ .Values.global.IMXC_IN_REGISTRY }}/eureka-server:{{ .Values.global.EUREKA_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8761 + #resources: + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "1200Mi" + # cpu: "500m" + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka-0.eureka:8761/eureka/,http://eureka-1.eureka:8761/eureka/,http://eureka-2.eureka:8761/eureka/ + - name: JVM_OPTS + value: "-Xms1g -Xmx1g" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "100Mi" + cpu: "20m" diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/imxc-api-server.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/imxc-api-server.yaml new file mode 100644 index 0000000..de967a6 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/imxc-api-server.yaml @@ -0,0 +1,245 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-api-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-api + ports: + - protocol: TCP + name: api + port: 8080 + targetPort: 8080 + nodePort: 32080 + - protocol: TCP + name: netty + port: 10100 + targetPort: 10100 + nodePort: 31100 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-api + namespace: imxc + labels: + app: imxc-api +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-api + template: + metadata: + labels: + app: imxc-api + build: develop + spec: + securityContext: + #runAsNonRoot: true + runAsUser: 1577 + initContainers: + - name: cloudmoa-api-permission-fix + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 0 +# - sh +# - -c +# - "chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log" + volumeMounts: + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + containers: + - name: imxc-api + image: {{ .Values.global.IMXC_IN_REGISTRY }}/api-server:{{ .Values.global.API_SERVER_VERSION }} + resources: + requests: + cpu: 200m + memory: 500Mi + limits: + cpu: 2000m + memory: 5000Mi + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-api-server.sh" | quote }}] + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + - name: SPRING_DATAGATE_URLS + value: "{{ .Values.global.DATAGATE_INSIDE_IP }}" + - name: SPRING_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_INSIDE_PORT }}" + - name: SPRING_REDIS_URLS + value: {{ .Values.global.REDIS_URLS }} + - name: SPRING_REDIS_PORT + value: "{{ .Values.global.REDIS_PORT }}" + - name: SPRING_REDIS_PASSWORD + value: {{ .Values.global.REDIS_PASSWORD }} + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + - name: SPRING_BOOT_ADMIN_CLIENT_URL + value: http://{{ .Values.global.IMXC_ADMIN_SERVER_DNS }}:8888 + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_NAME + value: Intermax Cloud API Server + - name: SPRING_BOOT_ADMIN_CLIENT_ENABLED + value: "false" + - name: OPENTRACING_JAEGER_ENABLED + value: "false" + - name: SPRING_JPA_PROPERTIES_HIBERNATE_GENERATE_STATISTICS + value: "false" + - name: IMXC_REPORT_ENABLED + value: "true" + - name: IMXC_ALERT_PERSIST + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_ENVIRONMENT + value: Demo + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_PREFERIP + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_NODENAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_PODNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: SPRING_BOOT_ADMIN_CLIENT_AUTODEREGISTRATION + value: "true" + - name: SPRING_JPA_HIBERNATE_DDL-AUTO + value: validate + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + - name: KEYCLOAK_RESOURCE + value: "{{ .Values.global.KEYCLOAK_RESOURCE }}" + - name: SPRING_KEYCLOAK_MASTER_USERNAME + value: "{{ .Values.global.KEYCLOAK_MASTER_USERNAME }}" + - name: SPRING_KEYCLOAK_MASTER_PASSWORD + value: "{{ .Values.global.KEYCLOAK_MASTER_PASSWORD }}" + - name: SPRING_LDAP_USE + value: "{{ .Values.global.IMXC_LDAP_USE }}" + - name: TIMEZONE + value: Asia/Seoul + - name: IMXC_PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: IMXC_PROMETHEUS_NAMESPACE + value: "imxc" + - name: LOGGING_LEVEL_ROOT + value: info + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + #R30020210730 추가 :: 현대카드는 true로 설정 + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-HOST + value: "exemmail1.ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PORT + value: "587" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-USERNAME + value: "imxc@ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PASSWORD + value: "1234" + - name: IMXC_ALERT_NOTIFICATION_MAIL_PROTOCOL + value: "smtp" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-REQ + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-ENB + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_SMTP-AUTH + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_DEBUG + value: "true" + - name: IMXC_ANOMALY_BLACK-LIST + value: "false" + - name: IMXC_VERSION_SAAS + value: "false" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_API_SERVER_KUBERNETES_SERVICE + value: info + - name: IMXC_WEBSOCKET_SCHEDULE_PERIOD_5SECOND + value: "30000" + - name: IMXC_CACHE_INFO_1MCACHE + value: "0 0/1 * * * ?" + - name: IMXC_EXECUTION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_PERMISSION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_CODE-LOG_USE + value: "false" + - name: IMXC_PORTAL_INFO_URL + value: "{{ .Values.global.IMXC_PORTAL_INFO_URL }}" + # Do not remove below rows related to AGENT-INSTALL. Added by youngmin 2021-03-29. + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_IP + value: {{ .Values.global.KAFKA_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_INTERFACE-PORT + value: "{{ .Values.global.KAFKA_INTERFACE_PORT }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_IP + value: {{ .Values.global.IMXC_API_SERVER_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_NETTY-PORT + value: "{{ .Values.global.APISERVER_NETTY_PORT }}" + - name: AGENT-INSTALL_REGISTRY_URL + value: {{ .Values.global.IMXC_IN_REGISTRY }} + - name: AGENT-INSTALL_IMAGE_TAG + value: {{ .Values.global.AGENT_IMAGE_TAG }} + - name: AGENT-INSTALL_JAEGER_AGENT_CLUSTERIP + value: {{ .Values.global.JAEGER_AGENT_CLUSTERIP }} + - name: AGENT-INSTALL_JAEGER_JAVA-SPECIALAGENT-CLASSPATH + value: {{ .Values.global.JAEGER_JAVA_SPECIALAGENT_CLASSPATH }} + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_IP + value: "{{ .Values.global.DATAGATE_OUTSIDE_IP }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_OUTSIDE_PORT }}" + - name: IMXC_REST-CONFIG_MAX-CON + value: "200" + - name: IMXC_REST-CONFIG_MAX-CON-ROUTE + value: "65" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + # Elasticsearch for Security + - name: SPRING_ELASTIC_SSL_USERNAME + value: "{{ .Values.global.CMOA_ES_ID }}" + - name: SPRING_ELASTIC_SSL_PASSWORD + value: "{{ .Values.global.CMOA_ES_PW }}" + - name: IMXC_BACK-LOGIN_ENABLED + value: "{{ .Values.global.BACKLOGIN }}" + volumeMounts: + - mountPath: /var/log/imxc-audit.log + name: auditlog + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + volumes: + - name: auditlog + hostPath: + path: {{ .Values.global.AUDITLOG_PATH }}/imxc-audit.log + type: FileOrCreate + - name: notification-upper-directory + hostPath: + path: /home/ + type: DirectoryOrCreate + - name: notification-directory + hostPath: + path: /home/cloudmoa_event.log + type: FileOrCreate diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/imxc-collector.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/imxc-collector.yaml new file mode 100644 index 0000000..e125243 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/imxc-collector.yaml @@ -0,0 +1,79 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-collector + template: + metadata: + labels: + app: cmoa-collector + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - name: cmoa-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cmoa-collector:{{ .Values.global.COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 500m + memory: 2500Mi + ports: + - containerPort: 12010 + env: + - name: LOCATION + value: Asia/Seoul + - name: KAFKA_SERVER + value: kafka:9092 + - name: ELASTICSEARCH + value: elasticsearch:9200 +# - name: PROMETHEUS +# value: nginx-cortex/prometheus + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: CMOA_ES_ID + value: {{ .Values.global.CMOA_ES_ID }} + - name: CMOA_ES_PW + value: {{ .Values.global.CMOA_ES_PW }} + resources: + requests: + cpu: "300m" + memory: "1500Mi" + limits: + cpu: "500m" + memory: "2500Mi" +- apiVersion: v1 + kind: Service + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + ports: + - name: cmoa-collector-exporter + port: 12010 + targetPort: 12010 + selector: + app: cmoa-collector + diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/noti-server.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/noti-server.yaml new file mode 100644 index 0000000..99c7a5b --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/noti-server.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: noti-server + namespace: imxc +spec: + selector: + matchLabels: + app: noti + replicas: 1 + template: + metadata: + labels: + app: noti + spec: + containers: + - name: noti-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/notification-server:{{ .Values.global.NOTI_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-noti-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: {{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }} + - name: KEYCLOAK_REALM + value: exem + + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + + # postgres configuration + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + + # redis configuration + - name: SPRING_REDIS_HOST + value: redis-master + - name: SPRING_REDIS_PORT + value: "6379" + - name: SPRING_REDIS_PASSWORD + value: dkagh1234! + + # elasticsearch configuration + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + + # file I/O configuration + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + + # rabbitmq configuration + - name: IMXC_RABBITMQ_HOST + value: base-rabbitmq + - name: IMXC_RABBITMQ_PORT + value: "61613" + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: IMXC_RABBITMQ_SYSTEM_ID + value: "user" + - name: IMXC_RABBITMQ_SYSTEM_PASSWORD + value: "eorbahrhkswp" + + # api-server configuration + - name: IMXC_API-SERVER-URL + value: "http://imxc-api-service:8080" + + # cortex integration + - name: SPRING_CORTEX_URLS + value: base-cortex-configs + - name: SPRING_CORTEX_PORT + value: "8080" + + # alert webhook + - name: IMXC_ALERT_WEBHOOK_URLS + value: http://noti-server-service:8080/alert + + # etc configuration + - name: IMXC_PROMETHEUS_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + - name: IMXC_ALERT_KUBERNETES_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "100Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: noti-server-service + namespace: imxc +spec: + type: NodePort + selector: + app: noti + ports: + - protocol: TCP + port: 8080 + nodePort: 31083 diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/streams-depl.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/streams-depl.yaml new file mode 100644 index 0000000..b3223e5 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/streams-depl.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-stream-txntrend-deployment + namespace: imxc + labels: + app: kafka-stream-txntrend +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-stream-txntrend + template: + metadata: + labels: + app: kafka-stream-txntrend + spec: + containers: + - name: kafka-stream-txntrend + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-stream-txntrend:{{ .Values.global.KAFKA_STREAM_VERSION }} + imagePullPolicy: IfNotPresent + env: + - name: SERVICE_KAFKA_HOST + value: kafka-broker:9094 + - name: SERVICE_STREAM_OUTPUT + value: jspd_txntrend diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/topology-agent.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/topology-agent.yaml new file mode 100644 index 0000000..80476a3 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/topology-agent.yaml @@ -0,0 +1,107 @@ +{{ if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{ else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{ end }} +kind: ClusterRoleBinding +metadata: + name: topology-agent + namespace: imxc + labels: + k8s-app: topology-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: topology-agent + namespace: imxc +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: topology-agent + namespace: imxc + labels: + app: topology-agent +spec: + selector: + matchLabels: + app: topology-agent + template: + metadata: + labels: + app: topology-agent + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + # below appended + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: topology-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/topology-agent:{{ .Values.global.TOPOLOGY_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + env: + - name: CLUSTER_ID + value: cloudmoa + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: DATAGATE + value: datagate:50051 + - name: LOG_RNAME_USE + value: "false" + - name: LOG_LEVEL + value: "DEBUG" + - name: CLOUDMOA_SETTING_PATH + value: /home/cloudmoa/setting/ + resources: + requests: + memory: "125Mi" + cpu: "100m" + limits: + memory: "600Mi" + cpu: "500m" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / diff --git a/roles/cmoa_demo_install/files/05-imxc/templates/zuul-server.yaml b/roles/cmoa_demo_install/files/05-imxc/templates/zuul-server.yaml new file mode 100644 index 0000000..79969d7 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/templates/zuul-server.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zuul-deployment + namespace: imxc + labels: + app: cloud +spec: + selector: + matchLabels: + app: cloud + replicas: 1 + template: + metadata: + labels: + app: cloud + spec: + containers: + - env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ZIPKIN_BASE-URL + value: http://zipkin-service:9411 + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_ZUULSERVER_FILTERS_AUTHFILTER + value: info + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + name: zuul + image: {{ .Values.global.IMXC_IN_REGISTRY }}/zuul-server:{{ .Values.global.ZUUL_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 + #- containerPort: 6831 + #protocol: UDP + #resources: + # requests: + # memory: "256Mi" + # cpu: "344m" + # limits: + # memory: "1Gi" + # cpu: "700m" + resources: + requests: + memory: "200Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: zuul + namespace: imxc + labels: + app: cloud +spec: + type: NodePort + selector: + app: cloud + ports: + - port: 8080 + targetPort: 8080 + nodePort: 31081 diff --git a/roles/cmoa_demo_install/files/05-imxc/values.yaml b/roles/cmoa_demo_install/files/05-imxc/values.yaml new file mode 100644 index 0000000..07c9a47 --- /dev/null +++ b/roles/cmoa_demo_install/files/05-imxc/values.yaml @@ -0,0 +1,157 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + IMXC_LDAP_USE: false + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AUDITLOG_PATH: /var/log + KAFKA_IP: kafka-broker + # 로드밸런서 안 쓴다고 가정했을때 입니다.. + KAFKA_INTERFACE_PORT: 9094 + APISERVER_NETTY_PORT: 10100 + #REGISTRY_URL: cdm-dev.exem-oss.org:5050 + #REGISTRY_URL: 10.10.31.243:5000/cmoa + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AGENT_IMAGE_TAG: rel0.0.0 + # Jaeger 관련변수 + JAEGER_AGENT_CLUSTERIP: 10.98.94.198 + JAEGER_JAVA_SPECIALAGENT_CLASSPATH: classpath:/install/opentracing-specialagent-1.7.4.jar + # added by DongWoo Kim 2021-06-21 + KEYCLOAK_AUTH_SERVER_URL: http://111.111.111.111:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_MASTER_USERNAME: admin + KEYCLOAK_MASTER_PASSWORD: admin + IMXC_PORTAL_INFO_URL: + KEYCLOAK_REALM: exem + # added by EunHye Kim 2021-08-25 + #DATAGATE_URLS: datagate + #DATAGATE_IP: 111.111.111.111 + #DATAGATE_PORT: 14268 + DATAGATE_INSIDE_IP: datagate + DATAGATE_INSIDE_PORT: 14268 + DATAGATE_OUTSIDE_IP: 111.111.111.111 + DATAGATE_OUTSIDE_PORT: 30051 + REDIS_URLS: redis-master + REDIS_PORT: 6379 + REDIS_PASSWORD: dkagh1234! + # added by DongWoo Kim 2021-08-31 (version of each module) + DATAGATE_VERSION: rel0.0.0 + #ADMIN_SERVER_VERSION: v1.0.0 + #API_SERVER_VERSION: CLOUD-172 + API_SERVER_VERSION: rel0.0.0 + COLLECTOR_VERSION: rel0.0.0 + #release-3.3.0 + TOPOLOGY_AGENT_VERSION: rel0.0.0 + METRIC_COLLECTOR_VERSION: rel0.0.0 + #v1.0.0 + METRIC_AGENT_VERSION: rel0.0.0 + # spring cloud + ZUUL_SERVER_VERSION: rel0.0.0 + #CMOA-1269 + EUREKA_SERVER_VERSION: rel0.0.0 + AUTH_SERVER_VERSION: rel0.0.0 + NOTI_SERVER_VERSION: rel0.0.0 + KAFKA_STREAM_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 + KUBE_INFO_FLAT_VERSION: rel0.0.0 + KUBE_INFO_BATCH_VERSION: rel0.0.0 + KUBE_INFO_CONNECTOR_VERSION: rel0.0.0 + + + CMOA_MANUAL_PORT: 31090 + + + # Keycloak + #KEYCLOAK_VERSION: v1.0.0 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + #IMXC_REGISTRY: 10.10.31.243:5000 + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + + # namespace 추가 + IMXC_NAMESPACE: imxc + + # ZUUL 8080으로 열어놓을것 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + JDBC_KIND: 'postgres' + JDBC_SERVER: 'postgres:5432' + JDBC_DB: 'postgresdb' + JDBC_USER: 'admin' + JDBC_PWD: 'eorbahrhkswp' + + KAFKA_INPUT_TOPIC: 'kubernetes_info' + + TABLE_PREFIX: 'cmoa_' + BLACK_LIST: 'configmap_base,cronjob_active,endpoint_base,endpoint_addresses,endpoint_notreadyaddresses,endpoint_ports,event_base,node_image,persistentvolume_base,persistentvolumeclaim_base,pod_volume,resourcequota_base,resourcequota_scopeselector' + DELETE_HOUR: '15' + BACKLOGIN: false diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml new file mode 100644 index 0000000..22f1055 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config-jaeger + namespace: imxc +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + // Env Settings servletURL + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + demoServletURL: "{{ .Values.global.DEMO_SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "https://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + manualURL: "https://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings interMaxURL + interMaxURL: "http://39.115.183.186:8085/intermax?", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_UI_VERSION }}', + UI_build_ver: '{{ .Values.global.UI_SERVER_VERSION }}', + maxSelectionSize: 30, + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + healthIndicatorStateInfo: [ + { + state: "critical", + // max: 1.0, + // over: 0.8, + max: 100, + over: 80, + text: "Critical", + color: "#ff4040", + level: 4, + }, { + state: "warning", + // max: 0.8, + // over: 0.5, + max: 80, + over: 50, + text: "Warning", + color: "#ffa733", + level: 3, + }, { + state: "attention", + // max: 0.5, + // over: 0.0, + max: 50, + over: 0, + text: "Attention", + // color: "#B4B83D", + color: "#1cbe85", + level: 2, + }, { + state: "normal", + max: 0, + over: 0, + text: "Normal", + // color: "#64B87D", + color: "#24b0ed", + level: 1, + }, + ] + }; + + diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml new file mode 100644 index 0000000..a0d959f --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service-jaeger + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui-jaeger + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31084 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui-jaeger + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui-jaeger + template: + metadata: + labels: + app: imxc-ui-jaeger + spec: + containers: + - name: imxc-ui-jaeger + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config-jaeger + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config-jaeger diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml new file mode 100644 index 0000000..19a8041 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : https + DEMO_SERVELET_URL_PROTOCOL : https + KEYCLOAK_AUTH_SERVER_URL: https://kc.exem-oss.org/auth/ + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: ag.exem-oss.org + ZUUL_SERVER_PORT: 443 + + NOTI_SERVER_IP: noti.exem-oss.org + NOTI_SERVER_PORT: 443 + + CMOA_MANUAL_SERVER_IP: manual.exem-oss.org + CMOA_MANUAL_PORT: 443 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel0.0.0 + UI_SERVER_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml new file mode 100644 index 0000000..a445b83 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config + namespace: imxc + +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + // Env Settings servletURL + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + // Env Settings interMaxURL + // ex) ~/intermax/?paConnect=1&paType=ResponseInspector&fromTime=1556096539206&toTime=1556096599206&serverName=jeus89 + interMaxURL: "http://39.115.183.186:8085/intermax?", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_VERSION }}', + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + // refreshTime: '4', // 리로드 주기 설정 4로 설정시 새벽 4시에 리로드 하게 됨 + intervalTime: { // 5의 배수여야만 함 + short: 5, + medium: 10, + long: 60, + }, + // excludedContents: { + // anomalyScoreSettings: true, // entity black list setting page + // anomalyScoreInSidebar: true, // anomaly score in side bar + // }, + serviceTraceAgentType: 'jspd' + }; diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml new file mode 100644 index 0000000..35c4b61 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui + template: + metadata: + labels: + app: imxc-ui + spec: + containers: + - name: imxc-ui + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config diff --git a/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml new file mode 100644 index 0000000..19a8041 --- /dev/null +++ b/roles/cmoa_demo_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : https + DEMO_SERVELET_URL_PROTOCOL : https + KEYCLOAK_AUTH_SERVER_URL: https://kc.exem-oss.org/auth/ + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: ag.exem-oss.org + ZUUL_SERVER_PORT: 443 + + NOTI_SERVER_IP: noti.exem-oss.org + NOTI_SERVER_PORT: 443 + + CMOA_MANUAL_SERVER_IP: manual.exem-oss.org + CMOA_MANUAL_PORT: 443 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel0.0.0 + UI_SERVER_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 diff --git a/roles/cmoa_demo_install/files/ip_change b/roles/cmoa_demo_install/files/ip_change new file mode 100755 index 0000000..ac13cc7 --- /dev/null +++ b/roles/cmoa_demo_install/files/ip_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_ip=$1 +after_ip=$2 +grep_path=$3 + +if [[ $before_ip == '' || $after_ip == '' ]]; then + echo '[Usage] $0 {before_ip} {after_ip}' + exit +fi + +grep -rn ${before_ip} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_ip}/${after_ip}/g" + +echo "success" \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/k8s_status b/roles/cmoa_demo_install/files/k8s_status new file mode 100755 index 0000000..16b3c61 --- /dev/null +++ b/roles/cmoa_demo_install/files/k8s_status @@ -0,0 +1,86 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, subprocess, io, time +from kubernetes import client, config +def debug_print(msg): + print(" # ", msg) + +def k8s_conn(KUBE_CONFIG_PATH): + config.load_kube_config( + config_file=KUBE_CONFIG_PATH + ) + k8s_api = client.CoreV1Api() + + return k8s_api + +def k8s_get_pod(k8s_api, namespace, target=''): + pretty=False + watch=False + timeout_seconds=30 + api_response = k8s_api.list_namespaced_pod(namespace, pretty=pretty, timeout_seconds=timeout_seconds, watch=watch) + pod_list=[] + for pod in api_response.items: + status = pod.status.phase + #container_status = pod.status.container_statuses[0] + #if container_status.started is False or container_status.ready is False: + # waiting_state = container_status.state.waiting + # if waiting_state.message is not None and 'Error' in waiting_state.message: + # status = waiting_state.reason + if target != '': + if target in pod.metadata.name: + return (pod.metadata.name + " " + status) + pod_list.append(pod.metadata.name+" "+status) + return pod_list + +def k8s_pod_status_check(k8s_api, waiting_time, namespace,except_pod=False): + num=0 + while True: + num+=1 + resp=k8s_get_pod(k8s_api, namespace) + all_run_flag=True + if debug_mode: + debug_print('-'*30) + debug_print('pod 상태 체크시도 : {} ({}s)'.format(num, waiting_time)) + debug_print('-'*30) + for i in resp: + if except_pod: + if except_pod in i.lower(): continue + if 'pending' in i.lower(): + all_run_flag=False + result='{} 결과: {}'.format(i, all_run_flag) + debug_print(result) + if all_run_flag: + if debug_mode: + debug_print('-'*30) + debug_print('[{}] pod All Running'.format(namespace)) + debug_print('-'*30) + for i in resp: debug_print(i) + break + else: time.sleep(int(waiting_time)) + +def main(): + namespace = os.sys.argv[1] + + try: + Except_k8s_pod = os.sys.argv[2] + except: + Except_k8s_pod = '' + + try: + KUBE_CONFIG_PATH = os.sys.argv[3] + os.environ["KUBECONFIG"]=KUBE_CONFIG_PATH + except: + KUBE_CONFIG_PATH = os.environ["KUBECONFIG"] + + k8s_api=k8s_conn(KUBE_CONFIG_PATH) + k8s_pod_status_check(k8s_api, 60, namespace, Except_k8s_pod) + + +if __name__ == "__main__": + try: + debug_mode=False + main() + except Exception as err: + print("[Usage] k8s_status {namespace} {Except_pod=(default=false)} {KUBECONFIG_PATH=(default=current env)}") + print(err) diff --git a/roles/cmoa_demo_install/files/postgres_check_data b/roles/cmoa_demo_install/files/postgres_check_data new file mode 100755 index 0000000..d377aeb --- /dev/null +++ b/roles/cmoa_demo_install/files/postgres_check_data @@ -0,0 +1,6 @@ +#!/bin/bash + +namespace=$1 +pg_pod=`kubectl -n ${namespace} get pod --no-headers | awk '{print $1}' | grep postgres` +kubectl_cmd="kubectl -n ${namespace} exec -it ${pg_pod} --" +${kubectl_cmd} bash -c "echo \"select count(*) from pg_database where datname='keycloak';\" | /usr/bin/psql -U postgres | egrep -iv '(count|---|row)' | tr -d ' ' | tr -d '\n'" \ No newline at end of file diff --git a/roles/cmoa_demo_install/files/rel_change b/roles/cmoa_demo_install/files/rel_change new file mode 100755 index 0000000..ae1f6b3 --- /dev/null +++ b/roles/cmoa_demo_install/files/rel_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_version=$1 +after_version=$2 +grep_path=$3 + +if [[ $before_version == '' || $after_version == '' ]]; then + echo '[Usage] $0 {before_version} {after_version}' + exit +fi + +grep -rn ${before_version} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_version}/${after_version}/g" + +echo "success" \ No newline at end of file diff --git a/roles/cmoa_demo_install/tasks/00-default-settings-master.yml b/roles/cmoa_demo_install/tasks/00-default-settings-master.yml new file mode 100644 index 0000000..4a17c4a --- /dev/null +++ b/roles/cmoa_demo_install/tasks/00-default-settings-master.yml @@ -0,0 +1,30 @@ +--- +- name: 1. Create a cmoa namespace + kubernetes.core.k8s: + name: "{{ cmoa_namespace }}" + api_version: v1 + kind: Namespace + state: present + +- name: 2. Create secret + kubernetes.core.k8s: + state: present + namespace: "{{ item }}" + src: "{{ role_path }}/files/00-default/secret_nexus.yaml" + apply: yes + with_items: + - "{{ cmoa_namespace }}" + - default + +- name: 3. kubeconfig check + shell: "echo $KUBECONFIG" + register: kubeconfig + +- name: 4. Patch default sa + shell: "{{ role_path }}/files/00-default/sa_patch.sh {{ kubeconfig.stdout }}" + +- name: 5. Master IP Setting + command: "{{ role_path }}/files/ip_change {{ before_ip }} {{ ansible_default_ipv4.address }} {{ role_path }}/files" + +- name: 6. CloudMOA Version Change + command: "{{ role_path }}/files/rel_change {{ before_version }} {{ cmoa_version }} {{ role_path }}/files" diff --git a/roles/cmoa_demo_install/tasks/00-default-settings-node.yml b/roles/cmoa_demo_install/tasks/00-default-settings-node.yml new file mode 100644 index 0000000..a568b74 --- /dev/null +++ b/roles/cmoa_demo_install/tasks/00-default-settings-node.yml @@ -0,0 +1,27 @@ +--- +- name: 1. Node add Label (worker1) + kubernetes.core.k8s: + apply: yes + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker1 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker1 + +- name: 2. Node add Label (worker2) + kubernetes.core.k8s: + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker2 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker2 \ No newline at end of file diff --git a/roles/cmoa_demo_install/tasks/01-storage-install.yml b/roles/cmoa_demo_install/tasks/01-storage-install.yml new file mode 100644 index 0000000..bef58ef --- /dev/null +++ b/roles/cmoa_demo_install/tasks/01-storage-install.yml @@ -0,0 +1,45 @@ +--- +- name: 1. yaml file install (sc, pv) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/01-storage/{{ item }}" + apply: yes + with_items: + - 00-storageclass.yaml + - 01-persistentvolume.yaml + +- name: 2. helmchart install (minio) + kubernetes.core.helm: + name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/01-storage/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/01-storage/{{item}}/values.yaml" + with_items: + - minio + +- name: 3. Change a Minio Api Service (NodePort=minio_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ minio_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ minio_service_port }}" + nodePort: "{{ minio_nodePort }}" + apply: yes + +- name: 4. Check Kubernetes Pods (minio) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 5. minio setting (minio) + command: "{{ role_path }}/files/01-storage/cmoa_minio {{ ansible_default_ipv4.address }}:{{ minio_nodePort }} {{ minio_user }} {{ bucket_name }} {{ days }} {{ rule_id }}" \ No newline at end of file diff --git a/roles/cmoa_demo_install/tasks/02-base-install.yml b/roles/cmoa_demo_install/tasks/02-base-install.yml new file mode 100644 index 0000000..f7924a6 --- /dev/null +++ b/roles/cmoa_demo_install/tasks/02-base-install.yml @@ -0,0 +1,51 @@ +--- +- name: 1. kafka broker config apply (base) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 00-kafka-broker-config.yaml + +- name: 2. coredns config apply (base) + kubernetes.core.k8s: + state: present + namespace: default + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 01-coredns.yaml + +- name: 3. helmchart install (base) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/02-base/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/02-base/{{item}}/values.yaml" + with_items: + - base + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/roles/cmoa_demo_install/tasks/03-ddl-dml.yml b/roles/cmoa_demo_install/tasks/03-ddl-dml.yml new file mode 100644 index 0000000..9c44f8e --- /dev/null +++ b/roles/cmoa_demo_install/tasks/03-ddl-dml.yml @@ -0,0 +1,64 @@ +- name: 1. Check Postgres DB Data + command: "{{ role_path }}/files/postgres_check_data {{ cmoa_namespace }}" + register: pg_check_result + +- name: 2. Insert Elasticsearch template + command: "sh {{ role_path }}/files/03-ddl-dml/elasticsearch/es-ddl-put.sh {{ cmoa_namespace }}" +# when: pg_check_result.stdout != '1' +# register: es + +#- debug: +# msg: "{{es.stdout_lines}}" + +- name: 2.1. Elasticsearch dependency deploy restart + command: "kubectl -n {{ cmoa_namespace }} rollout restart deploy alertmanager base-cortex-configs base-cortex-distributor base-cortex-ruler" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + +- name: 2.2. Check Kubernetes Pods (Elasticsearch dependency) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 3. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" + register: pod_list + when: pg_check_result.stdout != '1' + +- name: 4. Copy psql file in postgres (DDL) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_ddl.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_ddl.psql" + when: item is match('postgres') and pg_check_result.stdout != '1' + with_items: "{{ pod_list.stdout_lines }}" + ignore_errors: true + +- name: 5. Execute a command in postgres (DDL) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_ddl.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 6. Copy psql file in postgres (DML) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_dml.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 7. Execute a command in postgres (DML) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true \ No newline at end of file diff --git a/roles/cmoa_demo_install/tasks/04-keycloak-install.yml b/roles/cmoa_demo_install/tasks/04-keycloak-install.yml new file mode 100644 index 0000000..de5fc9c --- /dev/null +++ b/roles/cmoa_demo_install/tasks/04-keycloak-install.yml @@ -0,0 +1,34 @@ +--- +- name: 1. helmchart install (keycloak) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/04-keycloak" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/04-keycloak/values.yaml" + with_items: + - keycloak + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/roles/cmoa_demo_install/tasks/05-imxc-install.yml b/roles/cmoa_demo_install/tasks/05-imxc-install.yml new file mode 100644 index 0000000..420d2d1 --- /dev/null +++ b/roles/cmoa_demo_install/tasks/05-imxc-install.yml @@ -0,0 +1,16 @@ +--- +- name: 1. helmchart install (imxc) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/05-imxc" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/05-imxc/values.yaml" + with_items: + - imxc + +- name: 2. Check Kubernetes Pods (imxc / keycloak) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/roles/cmoa_demo_install/tasks/06-imxc-ui-install.yml b/roles/cmoa_demo_install/tasks/06-imxc-ui-install.yml new file mode 100644 index 0000000..7da82a1 --- /dev/null +++ b/roles/cmoa_demo_install/tasks/06-imxc-ui-install.yml @@ -0,0 +1,112 @@ +--- +- name: 1. helmchart install (imxc-ui-all) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + - imxc-ui-jspd + when: imxc_ui == 'all' + +- name: 1. helmchart install (imxc-ui-jaeger) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + when: imxc_ui == 'jaeger' + +- name: 2. Change a imxc-ui Service (imxc-ui-jaeger) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ jaeger_servicename }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ jaeger_service_port }}" + nodePort: "{{ jaeger_nodePort }}" + apply: yes + when: imxc_ui == 'jaeger' + +- name: 2. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" # Output is a column + register: pod_list + when: imxc_ui != 'all' + +- name: 3. Copy psql file in psql (imxc-jaeger) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jaeger_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 4. Execute a command in psql (imxc-jaeger) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 1. helmchart install (imxc-ui-jspd) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jspd + when: imxc_ui == 'jspd' + ignore_errors: true + +- name: 3. Copy psql file in postgres (imxc-ui-jspd) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jspd_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 4. Execute a command in postgres (imxc-ui-jspd) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 2. Check Kubernetes Pods (imxc ui) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/roles/cmoa_demo_install/tasks/07-keycloak-setting.yml b/roles/cmoa_demo_install/tasks/07-keycloak-setting.yml new file mode 100644 index 0000000..f800f87 --- /dev/null +++ b/roles/cmoa_demo_install/tasks/07-keycloak-setting.yml @@ -0,0 +1,76 @@ +--- +- name: 0. Generate keycloak auth token + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/realms/master/protocol/openid-connect/token" + method: POST + body: "client_id={{ keycloak_auth_client }}&username={{ keycloak_admin_user }}&password={{ keycloak_admin_password }}&grant_type=password" + validate_certs: no + #no_log: "{{ keycloak_no_log | default('True') }}" + register: keycloak_auth_response + until: keycloak_auth_response.status == 200 + retries: 5 + delay: 2 + +- name: 1. Determine if realm exists + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/admin/realms/{{ keycloak_realm }}" + method: GET + status_code: + - 200 + - 404 + headers: + Accept: "application/json" + Authorization: "Bearer {{ keycloak_auth_response.json.access_token }}" + register: keycloak_realm_exists + +- name: 2. Validate Keycloak clients + ansible.builtin.assert: + that: + - item.name is defined and item.name | length > 0 + - (item.client_id is defined and item.client_id | length > 0) or (item.id is defined and item.id | length > 0) + fail_msg: "For each keycloak client, attributes `name` and either `id` or `client_id` is required" + quiet: True + loop: "{{ keycloak_clients | flatten }}" + loop_control: + label: "{{ item.name | default('unnamed client') }}" + +- name: 3. update a Keycloak client + community.general.keycloak_client: + auth_client_id: "{{ keycloak_auth_client }}" + auth_keycloak_url: "{{ keycloak_url }}{{ keycloak_context }}" + auth_realm: "{{ keycloak_auth_realm }}" + auth_username: "{{ keycloak_admin_user }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ item.realm }}" + default_roles: "{{ item.roles | default(omit) }}" + client_id: "{{ item.client_id | default(omit) }}" + id: "{{ item.id | default(omit) }}" + name: "{{ item.name | default(omit) }}" + description: "{{ item.description | default(omit) }}" + root_url: "{{ item.root_url | default('') }}" + admin_url: "{{ item.admin_url | default('') }}" + base_url: "{{ item.base_url | default('') }}" + enabled: "{{ item.enabled | default(True) }}" + redirect_uris: "{{ item.redirect_uris | default(omit) }}" + web_origins: "{{ item.web_origins | default('+') }}" + bearer_only: "{{ item.bearer_only | default(omit) }}" + standard_flow_enabled: "{{ item.standard_flow_enabled | default(omit) }}" + implicit_flow_enabled: "{{ item.implicit_flow_enabled | default(omit) }}" + direct_access_grants_enabled: "{{ item.direct_access_grants_enabled | default(omit) }}" + service_accounts_enabled: "{{ item.service_accounts_enabled | default(omit) }}" + public_client: "{{ item.public_client | default(False) }}" + protocol: "{{ item.protocol | default(omit) }}" + state: present + #no_log: "{{ keycloak_no_log | default('True') }}" + register: create_client_result + loop: "{{ keycloak_clients | flatten }}" + when: (item.name is defined and item.client_id is defined) or (item.name is defined and item.id is defined) + +- name: 4. Dependency deploy restart + command: "kubectl -n {{ cmoa_namespace }} rollout restart deploy imxc-api noti-server auth-server zuul-deployment" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + + diff --git a/roles/cmoa_demo_install/tasks/08-finish.yml b/roles/cmoa_demo_install/tasks/08-finish.yml new file mode 100644 index 0000000..f06cc24 --- /dev/null +++ b/roles/cmoa_demo_install/tasks/08-finish.yml @@ -0,0 +1,92 @@ +--- +- name: 0. Check Kubernetes Pods (ALL) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 1. IP Setting reset + command: "{{ role_path }}/files/ip_change {{ansible_default_ipv4.address}} {{before_ip}} {{ role_path }}/files" + +- name: 2. CloudMOA Version reset + command: "{{ role_path }}/files/rel_change {{ cmoa_version }} {{ before_version }} {{ role_path }}/files" + +- debug: + msg: + - ======================================================================================= + - "## Keycloak WEB" + - keycloak URL = http://{{ ansible_default_ipv4.address }}:31082 + - --------------------------------------------------------------------------------------- + - "## Keycloak Login Theme Setting" + - "## WEB > Realm Settings > Themes > Login Theme" + - " > CloudMOA_V2" + - --------------------------------------------------------------------------------------- + - "## CloudMOA WEB " + - CloudMOA Jaeger = http://{{ ansible_default_ipv4.address }}:31080 + - CloudMOA JSPD = http://{{ ansible_default_ipv4.address }}:31084 + - ======================================================================================= + +#- name: Node add Label (worker1) +# shell: kubectl get node "{{ item }}" --show-labels +# register: worker1 +# with_items: +# - "{{ ansible_hostname }}" +# #when: ansible_hostname in groups.worker1 +# +#- name: Node add Label (worker2) +# shell: kubectl get node "{{ item }}" --show-labels +# register: worker2 +# with_items: +# - "{{ ansible_hostname }}" +# #when: ansible_hostname in groups.worker2 +# +# +#- name: debug +# debug: +# msg: "{{item}}" +# with_items: +# - "{{ worker1.stdout }}" +# - "{{ worker2.stdout }}" + +#- name: Iterate over pod names and delete the filtered ones +# #debug: +# # msg: "{{ item }}" +# kubernetes.core.k8s_cp: +# namespace: imxc +# pod: "{{ item }}" +# remote_path: /tmp/postgres_insert_ddl.psql +# local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_ddl.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') + +#- name: Execute a command +# kubernetes.core.k8s_exec: +# namespace: imxc +# pod: "{{ item }}" +# command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_ddl.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') +# +#- name: Iterate over pod names and delete the filtered ones +# #debug: +# # msg: "{{ item }}" +# kubernetes.core.k8s_cp: +# namespace: imxc +# pod: "{{ item }}" +# remote_path: /tmp/postgres_insert_dml.psql +# local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_dml.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') +# +#- name: Execute a command +# kubernetes.core.k8s_exec: +# namespace: imxc +# pod: "{{ item }}" +# command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_dml.psql" +# with_items: "{{ pod_list.stdout_lines }}" +# when: item is match('postgres') +# register: test +# +#- name: test +# debug: +# msg: "{{ test.stdout }}" +##- set_fact: +## postgres_pod: "{{ postgres_pod2.stdout_lines is match('postgres') | default(postgres_pod2) }}" +# \ No newline at end of file diff --git a/roles/cmoa_demo_install/tasks/helm-install.yml b/roles/cmoa_demo_install/tasks/helm-install.yml new file mode 100644 index 0000000..d057455 --- /dev/null +++ b/roles/cmoa_demo_install/tasks/helm-install.yml @@ -0,0 +1,60 @@ +--- +- name: Create Helm temporary directory + file: + path: /tmp/helm + state: directory + mode: "0755" + +- name: Fetch Helm package + get_url: + url: 'https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz' + dest: /tmp/helm.tar.gz + checksum: '{{ helm_checksum }}' + +- name: Extract Helm package + unarchive: + remote_src: true + src: /tmp/helm.tar.gz + dest: /tmp/helm + +- name: Ensure "docker" group exists + group: + name: docker + state: present + become: true + +- name: Install helm to /usr/local/bin + copy: + remote_src: true + src: /tmp/helm/linux-amd64/helm + dest: /usr/local/bin/helm + owner: root + group: docker + mode: "0755" + become: true + +- name: Cleanup Helm temporary directory + file: + path: /tmp/helm + state: absent + +- name: Cleanup Helm temporary download + file: + path: /tmp/helm.tar.gz + state: absent + +- name: Ensure bash_completion.d directory exists + file: + path: /etc/bash_completion.d + state: directory + mode: "0755" + become: true + +- name: Setup Helm tab-completion + shell: | + set -o pipefail + /usr/local/bin/helm completion bash | tee /etc/bash_completion.d/helm + args: + executable: /bin/bash + changed_when: false + become: true diff --git a/roles/cmoa_demo_install/tasks/main.yml b/roles/cmoa_demo_install/tasks/main.yml new file mode 100644 index 0000000..7239fa3 --- /dev/null +++ b/roles/cmoa_demo_install/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- include: helm-install.yml + tags: helm-install + +- include: 00-default-settings-master.yml + tags: default_setting + when: kubernetes_role == 'master' + +- include: 00-default-settings-node.yml + tags: default_setting_node + when: kubernetes_role == 'node' + +- include: 01-storage-install.yml + tags: storage-install + when: kubernetes_role == 'master' + +- include: 02-base-install.yml + tags: base-install + when: kubernetes_role == 'master' + +- include: 03-ddl-dml.yml + tags: ddl-dml + when: kubernetes_role == 'master' + +- include: 04-keycloak-install.yml + tags: keycloak-install + when: kubernetes_role == 'master' + +- include: 05-imxc-install.yml + tags: imxc-install + when: kubernetes_role == 'master' + +- include: 06-imxc-ui-install.yml + tags: imxc-ui-install + when: kubernetes_role == 'master' + +- include: 07-keycloak-setting.yml + tags: keycloak-setting + when: kubernetes_role == 'master' + +- include: 08-finish.yml + tags: finish + when: kubernetes_role == 'master' \ No newline at end of file diff --git a/roles/cmoa_demo_install/templates/realm.json.j2 b/roles/cmoa_demo_install/templates/realm.json.j2 new file mode 100644 index 0000000..1323ce2 --- /dev/null +++ b/roles/cmoa_demo_install/templates/realm.json.j2 @@ -0,0 +1,7 @@ +{ + "id": "{{ keycloak_realm }}", + "realm": "{{ keycloak_realm }}", + "enabled": true, + "eventsEnabled": true, + "eventsExpiration": 7200 +} diff --git a/roles/cmoa_demo_install/vars/main.yml b/roles/cmoa_demo_install/vars/main.yml new file mode 100644 index 0000000..14c8e95 --- /dev/null +++ b/roles/cmoa_demo_install/vars/main.yml @@ -0,0 +1,7 @@ +--- +# name of the realm to create, this is a required variable +keycloak_realm: Exem + +# other settings +keycloak_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_http_port }}" +keycloak_management_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_management_http_port }}" diff --git a/roles/cmoa_install/defaults/main.yml b/roles/cmoa_install/defaults/main.yml new file mode 100644 index 0000000..7c45df5 --- /dev/null +++ b/roles/cmoa_install/defaults/main.yml @@ -0,0 +1,65 @@ +# helm file install +helm_checksum: sha256:950439759ece902157cf915b209b8d694e6f675eaab5099fb7894f30eeaee9a2 +helm_version: v3.10.3 + +# cmoa info +cmoa_namespace: imxc +cmoa_version: rel3.4.8 + +# default ip/version (not change) +before_ip: 111.111.111.111 +before_version: rel0.0.0 + +# files/00-default in role +docker_secret_file: secret_nexus.yaml + +# all, jaeger, jspd +imxc_ui: all + +# [docker_config_path] +docker_config_nexus: dockerconfig/docker_config_nexus.json + +# [jaeger] +jaeger_servicename: imxc-ui-service-jaeger +jaeger_service_port: 80 +jaeger_nodePort: 31080 # only imxc-ui-jaeger option (imxc-ui-jaeger template default port=31084) + +# [minio] +minio_service_name: minio +minio_service_port: 9000 +minio_nodePort: 32002 +minio_user: cloudmoa +minio_pass: admin1234 +bucket_name: cortex-bucket +days: 42 +rule_id: cloudmoa + +# [Elasticsearch] +elasticsearch_service_name: elasticsearch +elasticsearch_service_port: 9200 +elasticsearch_nodePort: 30200 + +# [Keycloak] +# Keycloak configuration settings +keycloak_http_port: 31082 +keycloak_https_port: 8443 +keycloak_management_http_port: 31990 +keycloak_realm: exem + +# Keycloak administration console user +keycloak_admin_user: admin +keycloak_admin_password: admin +keycloak_auth_realm: master +keycloak_auth_client: admin-cli +keycloak_context: /auth +keycloak_login_theme: CloudMOA_V2 + +# keycloak_clients +keycloak_clients: + - name: 'authorization_server' + client_id: authorization_server + realm: exem + redirect_uris: "http://{{ ansible_default_ipv4.address }}:31080/*,http://{{ ansible_default_ipv4.address }}:31084/*,http://localhost:8080/*,http://localhost:8081/*" + public_client: True + + diff --git a/roles/cmoa_install/files/00-default/sa_patch.sh b/roles/cmoa_install/files/00-default/sa_patch.sh new file mode 100755 index 0000000..618a35b --- /dev/null +++ b/roles/cmoa_install/files/00-default/sa_patch.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +export KUBECONFIG=$1 + +kubectl wait node --for=condition=ready --all --timeout=60s + +#kubectl -n imxc patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' +kubectl -n default patch sa default -p '{"imagePullSecrets": [{"name": "regcred"}]}' diff --git a/roles/cmoa_install/files/00-default/secret_dockerhub.yaml b/roles/cmoa_install/files/00-default/secret_dockerhub.yaml new file mode 100644 index 0000000..268027b --- /dev/null +++ b/roles/cmoa_install/files/00-default/secret_dockerhub.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: regcred +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOiB7CiAgICAgICJhdXRoIjogIlpYaGxiV1JsZGpJNk0yWXlObVV6T0RjdFlqY3paQzAwTkRVMUxUazNaRFV0T1dWaU9EWmtObVl4WXpOayIKICAgIH0KICB9Cn0KCg== +type: kubernetes.io/dockerconfigjson diff --git a/roles/cmoa_install/files/00-default/secret_nexus.yaml b/roles/cmoa_install/files/00-default/secret_nexus.yaml new file mode 100644 index 0000000..6a2543f --- /dev/null +++ b/roles/cmoa_install/files/00-default/secret_nexus.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + .dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICIxMC4xMC4zMS4yNDM6NTAwMCI6IHsKICAgICAgImF1dGgiOiAiWTI5eVpUcGpiM0psWVdSdGFXNHhNak0wIgogICAgfQogIH0KfQoK +kind: Secret +metadata: + name: regcred +type: kubernetes.io/dockerconfigjson + diff --git a/roles/cmoa_install/files/01-storage/00-storageclass.yaml b/roles/cmoa_install/files/01-storage/00-storageclass.yaml new file mode 100644 index 0000000..8f41292 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/00-storageclass.yaml @@ -0,0 +1,6 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: exem-local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer diff --git a/roles/cmoa_install/files/01-storage/01-persistentvolume.yaml b/roles/cmoa_install/files/01-storage/01-persistentvolume.yaml new file mode 100644 index 0000000..1bd4546 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/01-persistentvolume.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-0 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv1 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-1 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv2 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker1 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-2 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv3 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-pv-3 +spec: + capacity: + storage: 50Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: exem-local-storage + local: + path: /media/data/minio/pv4 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: cmoa + operator: In + values: + - worker2 diff --git a/roles/cmoa_install/files/01-storage/cmoa_minio b/roles/cmoa_install/files/01-storage/cmoa_minio new file mode 100755 index 0000000..522b87d --- /dev/null +++ b/roles/cmoa_install/files/01-storage/cmoa_minio @@ -0,0 +1,63 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, time, urllib3 +from minio import Minio +from minio.lifecycleconfig import Expiration, LifecycleConfig, Rule, Transition +from minio.commonconfig import ENABLED, Filter + +def minio_conn(ipaddr, portnum, ac_key, sec_key): + conn='{}:{}'.format(ipaddr,portnum) + url='http://{}'.format(conn) + print(url) + minio_client = Minio( + conn, access_key=ac_key, secret_key=sec_key, secure=False, + http_client=urllib3.ProxyManager( + url, timeout=urllib3.Timeout.DEFAULT_TIMEOUT, + retries=urllib3.Retry( + total=5, backoff_factor=0.2, + status_forcelist=[ + 500, 502, 503, 504 + ], + ), + ), + ) + + return minio_client + +def minio_create_buckets(minio_client, bucket_name, days, rule_id="cloudmoa"): + config = LifecycleConfig( + [ + Rule( + ENABLED, + rule_filter=Filter(prefix=""), + rule_id=rule_id, + expiration=Expiration(days=days), + ), + ], + ) + minio_client.set_bucket_lifecycle(bucket_name, config) + +def minio_delete_bucket(client, bucket_name): + client.delete_bucket_lifecycle(bucket_name) + +def main(): + s3_url = os.sys.argv[1].split(':')[0] + s3_url_port = os.sys.argv[1].split(':')[1] + minio_user = os.sys.argv[2] + minio_pass = os.sys.argv[3] + bucket_name = os.sys.argv[4] + minio_days = os.sys.argv[5] + rule_id = os.sys.argv[6] + + print(s3_url, s3_url_port, minio_user, minio_pass) + + minio_client=minio_conn(s3_url, s3_url_port, minio_user, minio_pass) + minio_create_buckets(minio_client, bucket_name, minio_days, rule_id) + +if __name__ == "__main__": + try: + main() + except Exception as err: + print("[Usage] minio {url:port} {username} {password} {bucketName} {days} {ruleId}") + print(err) \ No newline at end of file diff --git a/roles/cmoa_install/files/01-storage/minio/.helmignore b/roles/cmoa_install/files/01-storage/minio/.helmignore new file mode 100644 index 0000000..a9fe727 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/roles/cmoa_install/files/01-storage/minio/Chart.yaml b/roles/cmoa_install/files/01-storage/minio/Chart.yaml new file mode 100644 index 0000000..fc21076 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +description: Multi-Cloud Object Storage +name: minio +version: 4.0.2 +appVersion: RELEASE.2022-05-08T23-50-31Z +keywords: + - minio + - storage + - object-storage + - s3 + - cluster +home: https://min.io +icon: https://min.io/resources/img/logo/MINIO_wordmark.png +sources: +- https://github.com/minio/minio +maintainers: +- name: MinIO, Inc + email: dev@minio.io diff --git a/roles/cmoa_install/files/01-storage/minio/README.md b/roles/cmoa_install/files/01-storage/minio/README.md new file mode 100644 index 0000000..ad3eb7d --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/README.md @@ -0,0 +1,235 @@ +# MinIO Helm Chart + +[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/minio/blob/master/LICENSE) + +MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. + +For more detailed documentation please visit [here](https://docs.minio.io/) + +## Introduction + +This chart bootstraps MinIO Cluster on [Kubernetes](http://kubernetes.io) using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Helm cli with Kubernetes cluster configured. +- PV provisioner support in the underlying infrastructure. (We recommend using ) +- Use Kubernetes version v1.19 and later for best experience. + +## Configure MinIO Helm repo + +```bash +helm repo add minio https://charts.min.io/ +``` + +### Installing the Chart + +Install this chart using: + +```bash +helm install --namespace minio --set rootUser=rootuser,rootPassword=rootpass123 --generate-name minio/minio +``` + +The command deploys MinIO on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +### Upgrading the Chart + +You can use Helm to update MinIO version in a live release. Assuming your release is named as `my-release`, get the values using the command: + +```bash +helm get values my-release > old_values.yaml +``` + +Then change the field `image.tag` in `old_values.yaml` file with MinIO image tag you want to use. Now update the chart using + +```bash +helm upgrade -f old_values.yaml my-release minio/minio +``` + +Default upgrade strategies are specified in the `values.yaml` file. Update these fields if you'd like to use a different strategy. + +### Configuration + +Refer the [Values file](./values.yaml) for all the possible config fields. + +You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +helm install --name my-release --set persistence.size=1Ti minio/minio +``` + +The above command deploys MinIO server with a 1Ti backing persistent volume. + +Alternately, you can provide a YAML file that specifies parameter values while installing the chart. For example, + +```bash +helm install --name my-release -f values.yaml minio/minio +``` + +### Persistence + +This chart provisions a PersistentVolumeClaim and mounts corresponding persistent volume to default location `/export`. You'll need physical storage available in the Kubernetes cluster for this to work. If you'd rather use `emptyDir`, disable PersistentVolumeClaim by: + +```bash +helm install --set persistence.enabled=false minio/minio +``` + +> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."* + +### Existing PersistentVolumeClaim + +If a Persistent Volume Claim already exists, specify it during installation. + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +helm install --set persistence.existingClaim=PVC_NAME minio/minio +``` + +### NetworkPolicy + +To enable network policy for MinIO, +install [a networking plugin that implements the Kubernetes +NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for *all* pods in the namespace: + +``` +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 9000. + +For more precise policy, set `networkPolicy.allowExternal=true`. This will +only allow pods with the generated client label to connect to MinIO. +This label will be displayed in the output of a successful install. + +### Existing secret + +Instead of having this chart create the secret for you, you can supply a preexisting secret, much +like an existing PersistentVolumeClaim. + +First, create the secret: + +```bash +kubectl create secret generic my-minio-secret --from-literal=rootUser=foobarbaz --from-literal=rootPassword=foobarbazqux +``` + +Then install the chart, specifying that you want to use an existing secret: + +```bash +helm install --set existingSecret=my-minio-secret minio/minio +``` + +The following fields are expected in the secret: + +| .data.\ in Secret | Corresponding variable | Description | Required | +|:------------------------|:-----------------------|:---------------|:---------| +| `rootUser` | `rootUser` | Root user. | yes | +| `rootPassword` | `rootPassword` | Root password. | yes | + +All corresponding variables will be ignored in values file. + +### Configure TLS + +To enable TLS for MinIO containers, acquire TLS certificates from a CA or create self-signed certificates. While creating / acquiring certificates ensure the corresponding domain names are set as per the standard [DNS naming conventions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-identity) in a Kubernetes StatefulSet (for a distributed MinIO setup). Then create a secret using + +```bash +kubectl create secret generic tls-ssl-minio --from-file=path/to/private.key --from-file=path/to/public.crt +``` + +Then install the chart, specifying that you want to use the TLS secret: + +```bash +helm install --set tls.enabled=true,tls.certSecret=tls-ssl-minio minio/minio +``` + +### Installing certificates from third party CAs + +MinIO can connect to other servers, including MinIO nodes or other server types such as NATs and Redis. If these servers use certificates that were not registered with a known CA, add trust for these certificates to MinIO Server by bundling these certificates into a Kubernetes secret and providing it to Helm via the `trustedCertsSecret` value. If `.Values.tls.enabled` is `true` and you're installing certificates for third party CAs, remember to include MinIO's own certificate with key `public.crt`, if it also needs to be trusted. + +For instance, given that TLS is enabled and you need to add trust for MinIO's own CA and for the CA of a Keycloak server, a Kubernetes secret can be created from the certificate files using `kubectl`: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=public.crt --from-file=keycloak.crt +``` + +If TLS is not enabled, you would need only the third party CA: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=keycloak.crt +``` + +The name of the generated secret can then be passed to Helm using a values file or the `--set` parameter: + +``` +trustedCertsSecret: "minio-trusted-certs" + +or + +--set trustedCertsSecret=minio-trusted-certs +``` + +### Create buckets after install + +Install the chart, specifying the buckets you want to create after install: + +```bash +helm install --set buckets[0].name=bucket1,buckets[0].policy=none,buckets[0].purge=false minio/minio +``` + +Description of the configuration parameters used above - + +- `buckets[].name` - name of the bucket to create, must be a string with length > 0 +- `buckets[].policy` - can be one of none|download|upload|public +- `buckets[].purge` - purge if bucket exists already + +33# Create policies after install +Install the chart, specifying the policies you want to create after install: + +```bash +helm install --set policies[0].name=mypolicy,policies[0].statements[0].resources[0]='arn:aws:s3:::bucket1',policies[0].statements[0].actions[0]='s3:ListBucket',policies[0].statements[0].actions[1]='s3:GetObject' minio/minio +``` + +Description of the configuration parameters used above - + +- `policies[].name` - name of the policy to create, must be a string with length > 0 +- `policies[].statements[]` - list of statements, includes actions and resources +- `policies[].statements[].resources[]` - list of resources that applies the statement +- `policies[].statements[].actions[]` - list of actions granted + +### Create user after install + +Install the chart, specifying the users you want to create after install: + +```bash +helm install --set users[0].accessKey=accessKey,users[0].secretKey=secretKey,users[0].policy=none,users[1].accessKey=accessKey2,users[1].secretRef=existingSecret,users[1].secretKey=password,users[1].policy=none minio/minio +``` + +Description of the configuration parameters used above - + +- `users[].accessKey` - accessKey of user +- `users[].secretKey` - secretKey of usersecretRef +- `users[].existingSecret` - secret name that contains the secretKey of user +- `users[].existingSecretKey` - data key in existingSecret secret containing the secretKey +- `users[].policy` - name of the policy to assign to user + +## Uninstalling the Chart + +Assuming your release is named as `my-release`, delete it using the command: + +```bash +helm delete my-release +``` + +or + +```bash +helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. diff --git a/roles/cmoa_install/files/01-storage/minio/templates/NOTES.txt b/roles/cmoa_install/files/01-storage/minio/templates/NOTES.txt new file mode 100644 index 0000000..9337196 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/NOTES.txt @@ -0,0 +1,43 @@ +{{- if eq .Values.service.type "ClusterIP" "NodePort" }} +MinIO can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: +{{ template "minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +To access MinIO from localhost, run the below commands: + + 1. export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + + 2. kubectl port-forward $POD_NAME 9000 --namespace {{ .Release.Namespace }} + +Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ + +You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }}-local + +{{- end }} +{{- if eq .Values.service.type "LoadBalancer" }} +MinIO can be accessed via port {{ .Values.service.port }} on an external IP address. Get the service external IP address by: +kubectl get svc --namespace {{ .Release.Namespace }} -l app={{ template "minio.fullname" . }} + +Note that the public IP may take a couple of minutes to be available. + +You can now access MinIO server on http://:9000. Follow the below steps to connect to MinIO server with mc client: + + 1. Download the MinIO mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret {{ template "minio.secretName" . }} --namespace {{ .Release.Namespace }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@:{{ .Values.service.port }} + + 3. mc ls {{ template "minio.fullname" . }} + +Alternately, you can use your browser or the MinIO SDK to access the server - https://docs.minio.io/categories/17 +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "minio.fullname" . }}-client=true" +will be able to connect to this minio cluster. +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_bucket.txt b/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_bucket.txt new file mode 100644 index 0000000..35a48fc --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_bucket.txt @@ -0,0 +1,109 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkBucketExists ($bucket) +# Check if the bucket exists, by using the exit code of `mc ls` +checkBucketExists() { + BUCKET=$1 + CMD=$(${MC} ls myminio/$BUCKET > /dev/null 2>&1) + return $? +} + +# createBucket ($bucket, $policy, $purge) +# Ensure bucket exists, purging if asked to +createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + VERSIONING=$4 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + ${MC} rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist + if ! checkBucketExists $BUCKET ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + + + # set versioning for bucket + if [ ! -z $VERSIONING ] ; then + if [ $VERSIONING = true ] ; then + echo "Enabling versioning for '$BUCKET'" + ${MC} version enable myminio/$BUCKET + elif [ $VERSIONING = false ] ; then + echo "Suspending versioning for '$BUCKET'" + ${MC} version suspend myminio/$BUCKET + fi + else + echo "Bucket '$BUCKET' versioning unchanged." + fi + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + ${MC} policy set $POLICY myminio/$BUCKET +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.buckets }} +{{ $global := . }} +# Create the buckets +{{- range .Values.buckets }} +createBucket {{ tpl .name $global }} {{ .policy }} {{ .purge }} {{ .versioning }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_policy.txt b/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_policy.txt new file mode 100644 index 0000000..d565b16 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_policy.txt @@ -0,0 +1,75 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkPolicyExists ($policy) +# Check if the policy exists, by using the exit code of `mc admin policy info` +checkPolicyExists() { + POLICY=$1 + CMD=$(${MC} admin policy info myminio $POLICY > /dev/null 2>&1) + return $? +} + +# createPolicy($name, $filename) +createPolicy () { + NAME=$1 + FILENAME=$2 + + # Create the name if it does not exist + echo "Checking policy: $NAME (in /config/$FILENAME.json)" + if ! checkPolicyExists $NAME ; then + echo "Creating policy '$NAME'" + else + echo "Policy '$NAME' already exists." + fi + ${MC} admin policy add myminio $NAME /config/$FILENAME.json + +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.policies }} +# Create the policies +{{- range $idx, $policy := .Values.policies }} +createPolicy {{ $policy.name }} policy_{{ $idx }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_user.txt b/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_user.txt new file mode 100644 index 0000000..7771428 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/_helper_create_user.txt @@ -0,0 +1,88 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkUserExists ($username) +# Check if the user exists, by using the exit code of `mc admin user info` +checkUserExists() { + USER=$1 + CMD=$(${MC} admin user info myminio $USER > /dev/null 2>&1) + return $? +} + +# createUser ($username, $password, $policy) +createUser() { + USER=$1 + PASS=$2 + POLICY=$3 + + # Create the user if it does not exist + if ! checkUserExists $USER ; then + echo "Creating user '$USER'" + ${MC} admin user add myminio $USER $PASS + else + echo "User '$USER' already exists." + fi + + + # set policy for user + if [ ! -z $POLICY -a $POLICY != " " ] ; then + echo "Adding policy '$POLICY' for '$USER'" + ${MC} admin policy set myminio $POLICY user=$USER + else + echo "User '$USER' has no policy attached." + fi +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.users }} +{{ $global := . }} +# Create the users +{{- range .Values.users }} +{{- if .existingSecret }} +createUser {{ tpl .accessKey $global }} $(cat /config/secrets/{{ tpl .accessKey $global }}) {{ .policy }} +{{ else }} +createUser {{ tpl .accessKey $global }} {{ .secretKey }} {{ .policy }} +{{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/_helper_custom_command.txt b/roles/cmoa_install/files/01-storage/minio/templates/_helper_custom_command.txt new file mode 100644 index 0000000..b583a77 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/_helper_custom_command.txt @@ -0,0 +1,58 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for MinIO service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# runCommand ($@) +# Run custom mc command +runCommand() { + ${MC} "$@" + return $? +} + +# Try connecting to MinIO instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{ if .Values.customCommands }} +# Run custom commands +{{- range .Values.customCommands }} +runCommand {{ .command }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/_helper_policy.tpl b/roles/cmoa_install/files/01-storage/minio/templates/_helper_policy.tpl new file mode 100644 index 0000000..83a2e15 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/_helper_policy.tpl @@ -0,0 +1,18 @@ +{{- $statements_length := len .statements -}} +{{- $statements_length := sub $statements_length 1 -}} +{ + "Version": "2012-10-17", + "Statement": [ +{{- range $i, $statement := .statements }} + { + "Effect": "Allow", + "Action": [ +"{{ $statement.actions | join "\",\n\"" }}" + ]{{ if $statement.resources }}, + "Resource": [ +"{{ $statement.resources | join "\",\n\"" }}" + ]{{ end }} + }{{ if lt $i $statements_length }},{{end }} +{{- end }} + ] +} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/_helpers.tpl b/roles/cmoa_install/files/01-storage/minio/templates/_helpers.tpl new file mode 100644 index 0000000..4e38194 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/_helpers.tpl @@ -0,0 +1,218 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "minio.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "minio.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "minio.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "minio.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.Version -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare ">=1.7-0, <1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else if semverCompare "^1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "minio.deployment.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "minio.statefulset.apiVersion" -}} +{{- if semverCompare "<1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "minio.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for console ingress. +*/}} +{{- define "minio.consoleIngress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Determine secret name. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.existingSecret -}} +{{- .Values.existingSecret }} +{{- else -}} +{{- include "minio.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Determine name for scc role and rolebinding +*/}} +{{- define "minio.sccRoleName" -}} +{{- printf "%s-%s" "scc" (include "minio.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Properly format optional additional arguments to MinIO binary +*/}} +{{- define "minio.extraArgs" -}} +{{- range .Values.extraArgs -}} +{{ " " }}{{ . }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- end -}} + +{{/* +Formats volumeMount for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolumeMount" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + mountPath: {{ .Values.certsPath }} +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $casPath := printf "%s/CAs" .Values.certsPath | clean }} +- name: trusted-cert-secret-volume + mountPath: {{ $casPath }} +{{- end }} +{{- end -}} + +{{/* +Formats volume for MinIO TLS keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolume" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: public.crt + - key: {{ .Values.tls.privateKey }} + path: private.key +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $certSecret := eq .Values.trustedCertsSecret "" | ternary .Values.tls.certSecret .Values.trustedCertsSecret }} +{{- $publicCrt := eq .Values.trustedCertsSecret "" | ternary .Values.tls.publicCrt "" }} +- name: trusted-cert-secret-volume + secret: + secretName: {{ $certSecret }} + {{- if ne $publicCrt "" }} + items: + - key: {{ $publicCrt }} + path: public.crt + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "minio.getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{- define "minio.root.username" -}} + {{- if .Values.rootUser }} + {{- .Values.rootUser | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 20 "Key" "rootUser") }} + {{- end }} +{{- end -}} + +{{- define "minio.root.password" -}} + {{- if .Values.rootPassword }} + {{- .Values.rootPassword | toString }} + {{- else }} + {{- include "minio.getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "minio.fullname" .) "Length" 40 "Key" "rootPassword") }} + {{- end }} +{{- end -}} \ No newline at end of file diff --git a/roles/cmoa_install/files/01-storage/minio/templates/configmap.yaml b/roles/cmoa_install/files/01-storage/minio/templates/configmap.yaml new file mode 100644 index 0000000..95a7c60 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/configmap.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + initialize: |- +{{ include (print $.Template.BasePath "/_helper_create_bucket.txt") . | indent 4 }} + add-user: |- +{{ include (print $.Template.BasePath "/_helper_create_user.txt") . | indent 4 }} + add-policy: |- +{{ include (print $.Template.BasePath "/_helper_create_policy.txt") . | indent 4 }} +{{- range $idx, $policy := .Values.policies }} + # {{ $policy.name }} + policy_{{ $idx }}.json: |- +{{ include (print $.Template.BasePath "/_helper_policy.tpl") . | indent 4 }} +{{ end }} + custom-command: |- +{{ include (print $.Template.BasePath "/_helper_custom_command.txt") . | indent 4 }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/console-ingress.yaml b/roles/cmoa_install/files/01-storage/minio/templates/console-ingress.yaml new file mode 100644 index 0000000..2ce9a93 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/console-ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.consoleIngress.enabled -}} +{{- $fullName := printf "%s-console" (include "minio.fullname" .) -}} +{{- $servicePort := .Values.consoleService.port -}} +{{- $ingressPath := .Values.consoleIngress.path -}} +apiVersion: {{ template "minio.consoleIngress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.consoleIngress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.consoleIngress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.consoleIngress.ingressClassName }} + ingressClassName: {{ .Values.consoleIngress.ingressClassName }} +{{- end }} +{{- if .Values.consoleIngress.tls }} + tls: + {{- range .Values.consoleIngress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.consoleIngress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/console-service.yaml b/roles/cmoa_install/files/01-storage/minio/templates/console-service.yaml new file mode 100644 index 0000000..f4b1294 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/console-service.yaml @@ -0,0 +1,48 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-console + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.consoleService.annotations }} + annotations: +{{ toYaml .Values.consoleService.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.consoleService.type "ClusterIP" "") (empty .Values.consoleService.type)) }} + type: ClusterIP + {{- if not (empty .Values.consoleService.clusterIP) }} + clusterIP: {{ .Values.consoleService.clusterIP }} + {{end}} +{{- else if eq .Values.consoleService.type "LoadBalancer" }} + type: {{ .Values.consoleService.type }} + loadBalancerIP: {{ default "" .Values.consoleService.loadBalancerIP }} +{{- else }} + type: {{ .Values.consoleService.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.consoleService.port }} + protocol: TCP +{{- if (and (eq .Values.consoleService.type "NodePort") ( .Values.consoleService.nodePort)) }} + nodePort: {{ .Values.consoleService.nodePort }} +{{- else }} + targetPort: {{ .Values.consoleService.port }} +{{- end}} +{{- if .Values.consoleService.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.consoleService.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/deployment.yaml b/roles/cmoa_install/files/01-storage/minio/templates/deployment.yaml new file mode 100644 index 0000000..a06bc35 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/deployment.yaml @@ -0,0 +1,174 @@ +{{- if eq .Values.mode "standalone" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: 1 + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio server {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }}" + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client_cert.pem" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client_cert_key.pem" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/gateway-deployment.yaml b/roles/cmoa_install/files/01-storage/minio/templates/gateway-deployment.yaml new file mode 100644 index 0000000..b14f86b --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/gateway-deployment.yaml @@ -0,0 +1,173 @@ +{{- if eq .Values.mode "gateway" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + replicas: {{ .Values.gateway.replicas }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "/bin/sh" + - "-ce" + {{- if eq .Values.gateway.type "nas" }} + - "/usr/bin/docker-entrypoint.sh minio gateway nas {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }} " + {{- end }} + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + {{- if .Values.persistence.enabled }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/tmp/credentials/etcd_client.crt" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/tmp/credentials/etcd_client.key" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/ingress.yaml b/roles/cmoa_install/files/01-storage/minio/templates/ingress.yaml new file mode 100644 index 0000000..8d9a837 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/ingress.yaml @@ -0,0 +1,58 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "minio.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: {{ template "minio.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/networkpolicy.yaml b/roles/cmoa_install/files/01-storage/minio/templates/networkpolicy.yaml new file mode 100644 index 0000000..68a2599 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/networkpolicy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + ingress: + - ports: + - port: {{ .Values.service.port }} + - port: {{ .Values.consoleService.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "minio.name" . }}-client: "true" + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/poddisruptionbudget.yaml b/roles/cmoa_install/files/01-storage/minio/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..8037eb7 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: minio + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} +spec: + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + app: {{ template "minio.name" . }} +{{- end }} \ No newline at end of file diff --git a/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml b/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml new file mode 100644 index 0000000..434b31d --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-bucket-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.buckets }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-bucket-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-bucket-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeBucketJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeBucketJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeBucketJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeBucketJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeBucketJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeBucketJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeBucketJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeBucketJob.resources | indent 10 }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml b/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml new file mode 100644 index 0000000..ae78769 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-policy-job.yaml @@ -0,0 +1,87 @@ +{{- if .Values.policies }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-policies-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-policies-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makePolicyJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.podAnnotations }} + annotations: +{{ toYaml .Values.makePolicyJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makePolicyJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makePolicyJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makePolicyJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makePolicyJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makePolicyJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makePolicyJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-policy"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makePolicyJob.resources | indent 10 }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-user-job.yaml b/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-user-job.yaml new file mode 100644 index 0000000..d3750e8 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/post-install-create-user-job.yaml @@ -0,0 +1,97 @@ +{{- $global := . -}} +{{- if .Values.users }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-user-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-make-user-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeUserJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeUserJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.makeUserJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.makeUserJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeUserJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeUserJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeUserJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeUserJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- range .Values.users }} + {{- if .existingSecret }} + - secret: + name: {{ tpl .existingSecret $global }} + items: + - key: {{ .existingSecretKey }} + path: secrets/{{ tpl .accessKey $global }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/add-user"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeUserJob.resources | indent 10 }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/post-install-custom-command.yaml b/roles/cmoa_install/files/01-storage/minio/templates/post-install-custom-command.yaml new file mode 100644 index 0000000..7e83faf --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/post-install-custom-command.yaml @@ -0,0 +1,87 @@ +{{- if .Values.customCommands }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-custom-command-job + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }}-custom-command-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.customCommandJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.podAnnotations }} + annotations: +{{ toYaml .Values.customCommandJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.customCommandJob.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.customCommandJob.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.customCommandJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.customCommandJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.customCommandJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.customCommandJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/custom-command"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.customCommandJob.resources | indent 10 }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/pvc.yaml b/roles/cmoa_install/files/01-storage/minio/templates/pvc.yaml new file mode 100644 index 0000000..369aade --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/pvc.yaml @@ -0,0 +1,35 @@ +{{- if eq .Values.mode "standalone" }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.persistence.VolumeName }} + volumeName: "{{ .Values.persistence.VolumeName }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/secrets.yaml b/roles/cmoa_install/files/01-storage/minio/templates/secrets.yaml new file mode 100644 index 0000000..da2ecab --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/secrets.yaml @@ -0,0 +1,22 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "minio.secretName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + rootUser: {{ include "minio.root.username" . | b64enc | quote }} + rootPassword: {{ include "minio.root.password" . | b64enc | quote }} + {{- if .Values.etcd.clientCert }} + etcd_client.crt: {{ .Values.etcd.clientCert | toString | b64enc | quote }} + {{- end }} + {{- if .Values.etcd.clientCertKey }} + etcd_client.key: {{ .Values.etcd.clientCertKey | toString | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/securitycontextconstraints.yaml b/roles/cmoa_install/files/01-storage/minio/templates/securitycontextconstraints.yaml new file mode 100644 index 0000000..4bac7e3 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/securitycontextconstraints.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.securityContext.enabled .Values.persistence.enabled (.Capabilities.APIVersions.Has "security.openshift.io/v1") }} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: [] +readOnlyRootFilesystem: false +defaultAddCapabilities: [] +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +fsGroup: + type: MustRunAs + ranges: + - max: {{ .Values.securityContext.fsGroup }} + min: {{ .Values.securityContext.fsGroup }} +runAsUser: + type: MustRunAs + uid: {{ .Values.securityContext.runAsUser }} +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/service.yaml b/roles/cmoa_install/files/01-storage/minio/templates/service.yaml new file mode 100644 index 0000000..64aa990 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/service.yaml @@ -0,0 +1,49 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + monitoring: "true" +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }} + type: ClusterIP + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP +{{- if (and (eq .Values.service.type "NodePort") ( .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} +{{- else }} + targetPort: 9000 +{{- end}} +{{- if .Values.service.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.service.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/serviceaccount.yaml b/roles/cmoa_install/files/01-storage/minio/templates/serviceaccount.yaml new file mode 100644 index 0000000..6a4bd94 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/serviceaccount.yaml @@ -0,0 +1,7 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name | quote }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/servicemonitor.yaml b/roles/cmoa_install/files/01-storage/minio/templates/servicemonitor.yaml new file mode 100644 index 0000000..809848f --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/servicemonitor.yaml @@ -0,0 +1,51 @@ +{{- if .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "minio.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{ else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- if .Values.tls.enabled }} + - port: https + scheme: https + {{ else }} + - port: http + scheme: http + {{- end }} + path: /minio/v2/metrics/cluster + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelConfigs }} +{{ toYaml .Values.metrics.serviceMonitor.relabelConfigs | indent 6 }} + {{- end }} + {{- if not .Values.metrics.serviceMonitor.public }} + bearerTokenSecret: + name: {{ template "minio.fullname" . }}-prometheus + key: token + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: + app: {{ include "minio.name" . }} + release: {{ .Release.Name }} + monitoring: "true" +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/templates/statefulset.yaml b/roles/cmoa_install/files/01-storage/minio/templates/statefulset.yaml new file mode 100644 index 0000000..b4160f0 --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/templates/statefulset.yaml @@ -0,0 +1,217 @@ +{{- if eq .Values.mode "distributed" }} +{{ $poolCount := .Values.pools | int }} +{{ $nodeCount := .Values.replicas | int }} +{{ $drivesPerNode := .Values.drivesPerNode | int }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $mountPath := .Values.mountPath }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +{{ $subPath := .Values.persistence.subPath }} +{{ $penabled := .Values.persistence.enabled }} +{{ $accessMode := .Values.persistence.accessMode }} +{{ $storageClass := .Values.persistence.storageClass }} +{{ $psize := .Values.persistence.size }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-svc + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + publishNotReadyAddresses: true + clusterIP: None + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +--- +apiVersion: {{ template "minio.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "minio.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + updateStrategy: + type: {{ .Values.StatefulSetUpdate.updateStrategy }} + podManagementPolicy: "Parallel" + serviceName: {{ template "minio.fullname" . }}-svc + replicas: {{ mul $poolCount $nodeCount }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: +{{- if not .Values.ignoreChartChecksums }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} + fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} + {{- end }} +{{- end }} +{{ if .Values.serviceAccount.create }} + serviceAccountName: {{ .Values.serviceAccount.name }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + + command: [ "/bin/sh", + "-ce", + "/usr/bin/docker-entrypoint.sh minio server {{- range $i := until $poolCount }}{{ $factor := mul $i $nodeCount }}{{ $endIndex := add $factor $nodeCount }}{{ $beginIndex := mul $i $nodeCount }} {{ $scheme }}://{{ template `minio.fullname` $ }}-{{ `{` }}{{ $beginIndex }}...{{ sub $endIndex 1 }}{{ `}`}}.{{ template `minio.fullname` $ }}-svc.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}{{if (gt $drivesPerNode 1)}}{{ $bucketRoot }}-{{ `{` }}0...{{ sub $drivesPerNode 1 }}{{ `}` }}{{else}}{{ $bucketRoot }}{{end}}{{- end}} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template `minio.extraArgs` . }}" ] + volumeMounts: + {{- if $penabled }} + {{- if (gt $drivesPerNode 1) }} + {{- range $i := until $drivesPerNode }} + - name: export-{{ $i }} + mountPath: {{ $mountPath }}-{{ $i }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- else }} + - name: export + mountPath: {{ $mountPath }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.extraSecret }} + - name: extra-secret + mountPath: "/tmp/minio-config-env" + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: {{ .Values.minioAPIPort }} + - name: {{ $scheme }}-console + containerPort: {{ .Values.minioConsolePort }} + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: rootPassword + {{- if .Values.extraSecret }} + - name: MINIO_CONFIG_ENV_FILE + value: "/tmp/minio-config-env/config.env" + {{- end}} + {{- if .Values.metrics.serviceMonitor.public }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + {{- end}} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- if .Values.extraSecret }} + - name: extra-secret + secret: + secretName: {{ .Values.extraSecret }} + {{- end }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode}} + - metadata: + name: export-{{ $diskId }} + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} + {{- else }} + - metadata: + name: export + {{- if $.Values.persistence.annotations }} + annotations: +{{ toYaml $.Values.persistence.annotations | trimSuffix "\n" | indent 10 }} + {{- end }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/01-storage/minio/values.yaml b/roles/cmoa_install/files/01-storage/minio/values.yaml new file mode 100644 index 0000000..a957f7f --- /dev/null +++ b/roles/cmoa_install/files/01-storage/minio/values.yaml @@ -0,0 +1,461 @@ +## Provide a name in place of minio for `app:` labels +## +nameOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## set kubernetes cluster domain where minio is running +## +clusterDomain: cluster.local + +## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the +## +image: + repository: 10.10.31.243:5000/cmoa3/minio + tag: RELEASE.2022-05-08T23-50-31Z + pullPolicy: IfNotPresent + +imagePullSecrets: + - name: "regcred" +# - name: "image-pull-secret" + +## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio +## client used to create a default bucket). +## +mcImage: + repository: 10.10.31.243:5000/cmoa3/mc + tag: RELEASE.2022-05-09T04-08-26Z + pullPolicy: IfNotPresent + +## minio mode, i.e. standalone or distributed or gateway. +mode: distributed ## other supported values are "standalone", "gateway" + +## Additional labels to include with deployment or statefulset +additionalLabels: [] + +## Additional annotations to include with deployment or statefulset +additionalAnnotations: [] + +## Typically the deployment/statefulset includes checksums of secrets/config, +## So that when these change on a subsequent helm install, the deployment/statefulset +## is restarted. This can result in unnecessary restarts under GitOps tooling such as +## flux, so set to "true" to disable this behaviour. +ignoreChartChecksums: false + +## Additional arguments to pass to minio binary +extraArgs: [] + +## Port number for MinIO S3 API Access +minioAPIPort: "9000" + +## Port number for MinIO Browser COnsole Access +minioConsolePort: "9001" + +## Update strategy for Deployments +DeploymentUpdate: + type: RollingUpdate + maxUnavailable: 0 + maxSurge: 100% + +## Update strategy for StatefulSets +StatefulSetUpdate: + updateStrategy: RollingUpdate + +## Pod priority settings +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Set default rootUser, rootPassword +## AccessKey and secretKey is generated when not set +## Distributed MinIO ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +rootUser: "admin" +rootPassword: "passW0rd" + +## Use existing Secret that store following variables: +## +## | Chart var | .data. in Secret | +## |:----------------------|:-------------------------| +## | rootUser | rootUser | +## | rootPassword | rootPassword | +## +## All mentioned variables will be ignored in values file. +## .data.rootUser and .data.rootPassword are mandatory, +## others depend on enabled status of corresponding sections. +existingSecret: "" + +## Directory on the MinIO pof +certsPath: "/etc/minio/certs/" +configPathmc: "/etc/minio/mc/" + +## Path where PV would be mounted on the MinIO Pod +mountPath: "/export" +## Override the root directory which the minio server should serve from. +## If left empty, it defaults to the value of {{ .Values.mountPath }} +## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }} +## +bucketRoot: "" + +# Number of drives attached to a node +drivesPerNode: 2 +# Number of MinIO containers running +#replicas: 16 +replicas: 2 +# Number of expanded MinIO clusters +pools: 1 + +# Deploy if 'mode == gateway' - 4 replicas. +gateway: + type: "nas" # currently only "nas" are supported. + replicas: 4 + +## TLS Settings for MinIO +tls: + enabled: false + ## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + certSecret: "" + publicCrt: public.crt + privateKey: private.key + +## Trusted Certificates Settings for MinIO. Ref: https://docs.minio.io/docs/how-to-secure-access-to-minio-server-with-tls#install-certificates-from-third-party-cas +## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret +## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt. +## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret. +trustedCertsSecret: "" + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + annotations: {} + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## minio data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + ## Storage class of PV to bind. By default it looks for standard storage class. + ## If the PV uses a different storage class, specify that here. + storageClass: "exem-local-storage" + VolumeName: "" + accessMode: ReadWriteOnce + size: 50Gi + + ## If subPath is set mount a sub folder of a volume instead of the root of the volume. + ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). + ## + subPath: "" + +## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +#service: +# type: NodePort +# clusterIP: ~ + ## Make sure to match it to minioAPIPort +# port: "9000" +# nodePort: "32002" + +service: + type: ClusterIP + clusterIP: ~ + ## Make sure to match it to minioAPIPort + port: "9000" + +## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## + +ingress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +consoleService: + type: NodePort + clusterIP: ~ + ## Make sure to match it to minioConsolePort + port: "9001" + nodePort: "32001" + +consoleIngress: + enabled: false + # ingressClassName: "" + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - console.minio-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +tolerations: [] +affinity: {} + +## Add stateful containers to have security context, if enabled MinIO will run as this +## user and group NOTE: securityContext is only enabled if persistence.enabled=true +securityContext: + enabled: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: "OnRootMismatch" + +# Additational pod annotations +podAnnotations: {} + +# Additional pod labels +podLabels: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + #memory: 16Gi + memory: 1Gi + cpu: 200m + +## List of policies to be created after minio install +## +## In addition to default policies [readonly|readwrite|writeonly|consoleAdmin|diagnostics] +## you can define additional policies with custom supported actions and resources +policies: [] +## writeexamplepolicy policy grants creation or deletion of buckets with name +## starting with example. In addition, grants objects write permissions on buckets starting with +## example. +# - name: writeexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:AbortMultipartUpload" +# - "s3:GetObject" +# - "s3:DeleteObject" +# - "s3:PutObject" +# - "s3:ListMultipartUploadParts" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:CreateBucket" +# - "s3:DeleteBucket" +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## readonlyexamplepolicy policy grants access to buckets with name starting with example. +## In addition, grants objects read permissions on buckets starting with example. +# - name: readonlyexamplepolicy +# statements: +# - resources: +# - 'arn:aws:s3:::example*/*' +# actions: +# - "s3:GetObject" +# - resources: +# - 'arn:aws:s3:::example*' +# actions: +# - "s3:GetBucketLocation" +# - "s3:ListBucket" +# - "s3:ListBucketMultipartUploads" +## Additional Annotations for the Kubernetes Job makePolicyJob +makePolicyJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of users to be created after minio install +## +users: + ## Username, password and policy to be assigned to the user + ## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics] + ## Add new policies as explained here https://docs.min.io/docs/minio-multi-user-quickstart-guide.html + ## NOTE: this will fail if LDAP is enabled in your MinIO deployment + ## make sure to disable this if you are using LDAP. + - accessKey: cloudmoa + secretKey: admin1234 + policy: consoleAdmin + # Or you can refer to specific secret + #- accessKey: externalSecret + # existingSecret: my-secret + # existingSecretKey: password + # policy: readonly + + +## Additional Annotations for the Kubernetes Job makeUserJob +makeUserJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of buckets to be created after minio install +## +buckets: + - name: cortex-bucket + policy: none + purge: false + versioning: false + + # # Name of the bucket + # - name: bucket1 + # # Policy to be set on the + # # bucket [none|download|upload|public] + # policy: none + # # Purge if bucket exists already + # purge: false + # # set versioning for + # # bucket [true|false] + # versioning: false + # - name: bucket2 + # policy: none + # purge: false + # versioning: true + +## Additional Annotations for the Kubernetes Job makeBucketJob +makeBucketJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## List of command to run after minio install +## NOTE: the mc command TARGET is always "myminio" +customCommands: + # - command: "admin policy set myminio consoleAdmin group='cn=ops,cn=groups,dc=example,dc=com'" + +## Additional Annotations for the Kubernetes Job customCommandJob +customCommandJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + nodeSelector: {} + tolerations: [] + affinity: {} + +## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s) +## when Chart is deployed +environment: + ## Please refer for comprehensive list https://docs.min.io/minio/baremetal/reference/minio-server/minio-server.html + ## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io" + ## MINIO_BROWSER: "off" + +## The name of a secret in the same kubernetes namespace which contain secret values +## This can be useful for LDAP password, etc +## The key in the secret must be 'config.env' +## +# extraSecret: minio-extraenv + +networkPolicy: + enabled: false + allowExternal: true + +## PodDisruptionBudget settings +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +## Specify the service account to use for the MinIO pods. If 'create' is set to 'false' +## and 'name' is left unspecified, the account 'default' will be used. +serviceAccount: + create: true + ## The name of the service account to use. If 'create' is 'true', a service account with that name + ## will be created. + name: "minio-sa" + +metrics: + serviceMonitor: + enabled: false + public: true + additionalLabels: {} + relabelConfigs: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + +## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md +## Define endpoints to enable this section. +etcd: + endpoints: [] + pathPrefix: "" + corednsPathPrefix: "" + clientCert: "" + clientCertKey: "" diff --git a/roles/cmoa_install/files/02-base/00-kafka-broker-config.yaml b/roles/cmoa_install/files/02-base/00-kafka-broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/roles/cmoa_install/files/02-base/00-kafka-broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/roles/cmoa_install/files/02-base/01-coredns.yaml b/roles/cmoa_install/files/02-base/01-coredns.yaml new file mode 100644 index 0000000..c1cb74b --- /dev/null +++ b/roles/cmoa_install/files/02-base/01-coredns.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-dns + kubernetes.io/name: coredns + name: coredns + namespace: kube-system +spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + - name: metrics + port: 9153 + protocol: TCP + targetPort: 9153 + selector: + k8s-app: kube-dns + sessionAffinity: None + type: ClusterIP + diff --git a/roles/cmoa_install/files/02-base/base/.helmignore b/roles/cmoa_install/files/02-base/base/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_install/files/02-base/base/Chart.yaml b/roles/cmoa_install/files/02-base/base/Chart.yaml new file mode 100644 index 0000000..74d1d30 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: base +version: 0.1.0 diff --git a/roles/cmoa_install/files/02-base/base/charts/analysis/.helmignore b/roles/cmoa_install/files/02-base/base/charts/analysis/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/analysis/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_install/files/02-base/base/charts/analysis/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/analysis/Chart.yaml new file mode 100644 index 0000000..74b9505 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/analysis/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: analysis +version: 0.1.0 diff --git a/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml b/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml new file mode 100644 index 0000000..21a9298 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-master.yaml @@ -0,0 +1,87 @@ +#docker run -d --hostname my-rabbit --name some-rabbit -p 8080:15672 -p 5672:5672 rabbitmq:3-management + +--- +kind: Service +apiVersion: v1 +metadata: + name: metric-analyzer-master + namespace: imxc +spec: +# clusterIP: None # We need a headless service to allow the pods to discover each + ports: # other during autodiscover phase for cluster creation. + - name: http # A ClusterIP will prevent resolving dns requests for other pods + protocol: TCP # under the same service. + port: 15672 + targetPort: 15672 +# nodePort: 30001 + - name: amqp + protocol: TCP + port: 5672 + targetPort: 5672 +# nodePort: 30002 + selector: + app: metric-analyzer-master +# type: NodePort +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-master + name: metric-analyzer-master + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: metric-analyzer-master + template: + metadata: + labels: + app: metric-analyzer-master + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer:{{ .Values.global.METRIC_ANALYZER_MASTER_VERSION }} + imagePullPolicy: IfNotPresent + name: master +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: POSTGRES_SERVER + value: postgres + - name: POSTGRES_USER + value: admin + - name: POSTGRES_PW + value: eorbahrhkswp + - name: POSTGRES_DB + value: postgresdb + - name: PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: POSTGRES_PORT + value: "5432" + - name: ES_SERVER + value: elasticsearch + - name: ES_PORT + value: "9200" + - name: ES_ID + value: "elastic" + - name: ES_PWD + value: "elastic" + - name: LOG_LEVEL + value: INFO + - name: AI_TYPE + value: BASELINE + - name: BASELINE_SIZE + value: "3" + - name: CHECK_DAY + value: "2" + resources: + requests: + memory: "100Mi" diff --git a/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml b/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml new file mode 100644 index 0000000..7e6eaea --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/analysis/templates/imxc-metric-analyzer-worker.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metric-analyzer-worker + name: metric-analyzer-worker + namespace: imxc +spec: + replicas: 10 + selector: + matchLabels: + app: metric-analyzer-worker + template: + metadata: + labels: + app: metric-analyzer-worker + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric_analyzer_worker:{{ .Values.global.METRIC_ANALYZER_WORKER_VERSION }} + imagePullPolicy: IfNotPresent + name: worker +# volumeMounts: +# - mountPath: /etc/localtime +# name: timezone-config + env: + - name: BROKER + value: base-rabbitmq + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" +# volumes: +# - hostPath: +# path: /usr/share/zoneinfo/Asia/Seoul +# name: timezone-config + resources: + requests: + memory: "100Mi" diff --git a/roles/cmoa_install/files/02-base/base/charts/analysis/values.yaml b/roles/cmoa_install/files/02-base/base/charts/analysis/values.yaml new file mode 100644 index 0000000..d764210 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/analysis/values.yaml @@ -0,0 +1,68 @@ +# Default values for analysis. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/.helmignore b/roles/cmoa_install/files/02-base/base/charts/cortex/.helmignore new file mode 100644 index 0000000..db3418b --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/.helmignore @@ -0,0 +1,29 @@ +# Git +.git/ +.gitignore +.github/ + +# IDE +.project +.idea/ +*.tmproj + +# Common backup files +*.swp +*.bak +*.tmp +*~ + +# Cortex ignore +docs/ +tools/ +ct.yaml +ci/ +README.md.gotmpl +.prettierignore +CHANGELOG.md +MAINTAINERS.md +LICENSE +Makefile +renovate.json + diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.lock b/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.lock new file mode 100644 index 0000000..f909218 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.lock @@ -0,0 +1,24 @@ +dependencies: +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +digest: sha256:a6b7c1239f9cabc85dd647798a6f92ae8a9486756ab1e87fc11af2180ab03ee4 +generated: "2021-12-25T19:21:57.666697218Z" diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.yaml new file mode 100644 index 0000000..9122fe6 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/Chart.yaml @@ -0,0 +1,56 @@ +apiVersion: v2 +appVersion: v1.11.0 +dependencies: +- alias: memcached + condition: memcached.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-read + condition: memcached-index-read.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-index-write + condition: memcached-index-write.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-frontend + condition: memcached-frontend.enabled + name: memcached + repository: https://charts.bitnami.com/bitnami + version: 5.15.12 +- alias: memcached-blocks-index + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +- alias: memcached-blocks-metadata + name: memcached + repository: https://charts.bitnami.com/bitnami + tags: + - blocks-storage-memcached + version: 5.15.12 +description: Horizontally scalable, highly available, multi-tenant, long term Prometheus. +home: https://cortexmetrics.io/ +icon: https://avatars2.githubusercontent.com/u/43045022?s=200&v=4 +kubeVersion: ^1.19.0-0 +maintainers: +- email: thayward@infoblox.com + name: Tom Hayward + url: https://github.com/kd7lxl +- email: Niclas.Schad@plusserver.com + name: Niclas Schad + url: https://github.com/ShuzZzle +name: cortex +sources: +- https://github.com/cortexproject/cortex-helm-chart +version: 1.2.0 diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/README.md b/roles/cmoa_install/files/02-base/base/charts/cortex/README.md new file mode 100644 index 0000000..9a793d3 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/README.md @@ -0,0 +1,754 @@ + + +# cortex + +![Version: 1.2.0](https://img.shields.io/badge/Version-1.2.0-informational?style=flat-square) ![AppVersion: v1.11.0](https://img.shields.io/badge/AppVersion-v1.11.0-informational?style=flat-square) + +Horizontally scalable, highly available, multi-tenant, long term Prometheus. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Tom Hayward | thayward@infoblox.com | https://github.com/kd7lxl | +| Niclas Schad | Niclas.Schad@plusserver.com | https://github.com/ShuzZzle | + +## Documentation + +Checkout our documentation for the cortex-helm-chart [here](https://cortexproject.github.io/cortex-helm-chart/) + +## Dependencies + +### Key-Value store + +Cortex requires a Key-Value (KV) store to store the ring. It can use traditional KV stores like [Consul](https://www.consul.io/) or [etcd](https://etcd.io/), but it can also build its own KV store on top of memberlist library using a gossip algorithm. + +The recommended approach is to use the built-in memberlist as a KV store, where supported. + +External KV stores can be installed alongside Cortex using their respective helm charts https://github.com/bitnami/charts/tree/master/bitnami/etcd and https://github.com/helm/charts/tree/master/stable/consul. + +### Storage + +Cortex requires a storage backend to store metrics and indexes. +See [cortex documentation](https://cortexmetrics.io/docs/) for details on storage types and documentation + +## Installation + +[Helm](https://helm.sh) must be installed to use the charts. +Please refer to Helm's [documentation](https://helm.sh/docs/) to get started. + +Once Helm is set up properly, add the repo as follows: + +```bash + helm repo add cortex-helm https://cortexproject.github.io/cortex-helm-chart +``` + +Cortex can now be installed with the following command: + +```bash + helm install cortex --namespace cortex cortex-helm/cortex +``` + +If you have custom options or values you want to override: + +```bash + helm install cortex --namespace cortex -f my-cortex-values.yaml cortex-helm/cortex +``` + +Specific versions of the chart can be installed using the `--version` option, with the default being the latest release. +What versions are available for installation can be listed with the following command: + +```bash + helm search repo cortex-helm +``` + +As part of this chart many different pods and services are installed which all +have varying resource requirements. Please make sure that you have sufficient +resources (CPU/memory) available in your cluster before installing Cortex Helm +chart. + +## Upgrades + +To upgrade Cortex use the following command: + +```bash + helm upgrade cortex -f my-cortex-values.yaml cortex-helm/cortex +``` +Note that it might be necessary to use `--reset-values` since some default values in the values.yaml might have changed or were removed. + +Source code can be found [here](https://cortexmetrics.io/) + +## Requirements + +Kubernetes: `^1.19.0-0` + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | memcached(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-read(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-index-write(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-frontend(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-index(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks(memcached) | 5.15.12 | +| https://charts.bitnami.com/bitnami | memcached-blocks-metadata(memcached) | 5.15.12 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| alertmanager.​affinity | object | `{}` | | +| alertmanager.​annotations | object | `{}` | | +| alertmanager.​containerSecurityContext.​enabled | bool | `true` | | +| alertmanager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| alertmanager.​enabled | bool | `true` | | +| alertmanager.​env | list | `[]` | Extra env variables to pass to the cortex container | +| alertmanager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log level (debug, info, warn, error) | +| alertmanager.​extraContainers | list | `[]` | Additional containers to be added to the cortex pod. | +| alertmanager.​extraPorts | list | `[]` | Additional ports to the cortex services. Useful to expose extra container ports. | +| alertmanager.​extraVolumeMounts | list | `[]` | Extra volume mounts that will be added to the cortex container | +| alertmanager.​extraVolumes | list | `[]` | Additional volumes to the cortex pod. | +| alertmanager.​initContainers | list | `[]` | Init containers to be added to the cortex pod. | +| alertmanager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​nodeSelector | object | `{}` | | +| alertmanager.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Alertmanager data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| alertmanager.​persistentVolume.​annotations | object | `{}` | Alertmanager data Persistent Volume Claim annotations | +| alertmanager.​persistentVolume.​enabled | bool | `true` | If true and alertmanager.statefulSet.enabled is true, Alertmanager will create/use a Persistent Volume Claim If false, use emptyDir | +| alertmanager.​persistentVolume.​size | string | `"2Gi"` | Alertmanager data Persistent Volume size | +| alertmanager.​persistentVolume.​storageClass | string | `nil` | Alertmanager data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| alertmanager.​persistentVolume.​subPath | string | `""` | Subdirectory of Alertmanager data Persistent Volume to mount Useful if the volume's root directory is not empty | +| alertmanager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| alertmanager.​podDisruptionBudget | object | `{"maxUnavailable":1}` | If not set then a PodDisruptionBudget will not be created | +| alertmanager.​podLabels | object | `{}` | Pod Labels | +| alertmanager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​replicas | int | `1` | | +| alertmanager.​resources | object | `{}` | | +| alertmanager.​securityContext | object | `{}` | | +| alertmanager.​service.​annotations | object | `{}` | | +| alertmanager.​service.​labels | object | `{}` | | +| alertmanager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| alertmanager.​serviceMonitor.​additionalLabels | object | `{}` | | +| alertmanager.​serviceMonitor.​enabled | bool | `false` | | +| alertmanager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| alertmanager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| alertmanager.​serviceMonitor.​relabelings | list | `[]` | | +| alertmanager.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/data","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_alertmanager","labelValue":null,"resources":{},"searchNamespace":null,"skipTlsVerify":false,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| alertmanager.​sidecar.​skipTlsVerify | bool | `false` | skipTlsVerify Set to true to skip tls verification for kube api calls | +| alertmanager.​startupProbe.​failureThreshold | int | `10` | | +| alertmanager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| alertmanager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| alertmanager.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful for using a persistent volume for storing silences between restarts. | +| alertmanager.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| alertmanager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| alertmanager.​strategy.​type | string | `"RollingUpdate"` | | +| alertmanager.​terminationGracePeriodSeconds | int | `60` | | +| alertmanager.​tolerations | list | `[]` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| clusterDomain | string | `"cluster.local"` | Kubernetes cluster DNS domain | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"compactor"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| compactor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| compactor.​annotations | object | `{}` | | +| compactor.​containerSecurityContext.​enabled | bool | `true` | | +| compactor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| compactor.​enabled | bool | `true` | | +| compactor.​env | list | `[]` | | +| compactor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| compactor.​extraContainers | list | `[]` | | +| compactor.​extraPorts | list | `[]` | | +| compactor.​extraVolumeMounts | list | `[]` | | +| compactor.​extraVolumes | list | `[]` | | +| compactor.​initContainers | list | `[]` | | +| compactor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​nodeSelector | object | `{}` | | +| compactor.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | compactor data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| compactor.​persistentVolume.​annotations | object | `{}` | compactor data Persistent Volume Claim annotations | +| compactor.​persistentVolume.​enabled | bool | `true` | If true compactor will create/use a Persistent Volume Claim If false, use emptyDir | +| compactor.​persistentVolume.​size | string | `"2Gi"` | | +| compactor.​persistentVolume.​storageClass | string | `nil` | compactor data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| compactor.​persistentVolume.​subPath | string | `""` | Subdirectory of compactor data Persistent Volume to mount Useful if the volume's root directory is not empty | +| compactor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| compactor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| compactor.​podLabels | object | `{}` | Pod Labels | +| compactor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​replicas | int | `1` | | +| compactor.​resources | object | `{}` | | +| compactor.​securityContext | object | `{}` | | +| compactor.​service.​annotations | object | `{}` | | +| compactor.​service.​labels | object | `{}` | | +| compactor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| compactor.​serviceMonitor.​additionalLabels | object | `{}` | | +| compactor.​serviceMonitor.​enabled | bool | `false` | | +| compactor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| compactor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| compactor.​serviceMonitor.​relabelings | list | `[]` | | +| compactor.​startupProbe.​failureThreshold | int | `60` | | +| compactor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| compactor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| compactor.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| compactor.​startupProbe.​initialDelaySeconds | int | `120` | | +| compactor.​startupProbe.​periodSeconds | int | `30` | | +| compactor.​strategy.​type | string | `"RollingUpdate"` | | +| compactor.​terminationGracePeriodSeconds | int | `240` | | +| compactor.​tolerations | list | `[]` | | +| config.​alertmanager.​enable_api | bool | `false` | Enable the experimental alertmanager config api. | +| config.​alertmanager.​external_url | string | `"/api/prom/alertmanager"` | | +| config.​alertmanager.​storage | object | `{}` | Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config | +| config.​api.​prometheus_http_prefix | string | `"/prometheus"` | | +| config.​api.​response_compression_enabled | bool | `true` | Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression. | +| config.​auth_enabled | bool | `false` | | +| config.​blocks_storage.​bucket_store.​bucket_index.​enabled | bool | `true` | | +| config.​blocks_storage.​bucket_store.​sync_dir | string | `"/data/tsdb-sync"` | | +| config.​blocks_storage.​tsdb.​dir | string | `"/data/tsdb"` | | +| config.​distributor.​pool.​health_check_ingesters | bool | `true` | | +| config.​distributor.​shard_by_all_labels | bool | `true` | Distribute samples based on all labels, as opposed to solely by user and metric name. | +| config.​frontend.​log_queries_longer_than | string | `"10s"` | | +| config.​ingester.​lifecycler.​final_sleep | string | `"30s"` | Duration to sleep for before exiting, to ensure metrics are scraped. | +| config.​ingester.​lifecycler.​join_after | string | `"10s"` | We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. It can take a while to have the full picture when using gossip | +| config.​ingester.​lifecycler.​num_tokens | int | `512` | | +| config.​ingester.​lifecycler.​observe_period | string | `"10s"` | To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, after putting their own tokens into it. This is only useful when using gossip, since multiple ingesters joining at the same time can have conflicting tokens if they don't see each other yet. | +| config.​ingester.​lifecycler.​ring.​kvstore.​store | string | `"memberlist"` | | +| config.​ingester.​lifecycler.​ring.​replication_factor | int | `3` | Ingester replication factor per default is 3 | +| config.​ingester_client.​grpc_client_config.​max_recv_msg_size | int | `10485760` | | +| config.​ingester_client.​grpc_client_config.​max_send_msg_size | int | `10485760` | | +| config.​limits.​enforce_metric_name | bool | `true` | Enforce that every sample has a metric name | +| config.​limits.​max_query_lookback | string | `"0s"` | | +| config.​limits.​reject_old_samples | bool | `true` | | +| config.​limits.​reject_old_samples_max_age | string | `"168h"` | | +| config.​memberlist.​bind_port | int | `7946` | | +| config.​memberlist.​join_members | list | `["{{ include \"cortex.fullname\" $ }}-memberlist"]` | the service name of the memberlist if using memberlist discovery | +| config.​querier.​active_query_tracker_dir | string | `"/data/active-query-tracker"` | | +| config.​querier.​query_ingesters_within | string | `"13h"` | Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester. Ingesters by default have no data older than 12 hours, so we can safely set this 13 hours | +| config.​querier.​query_store_after | string | `"12h"` | The time after which a metric should be queried from storage and not just ingesters. | +| config.​querier.​store_gateway_addresses | string | automatic | Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should is set automatically when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring). | +| config.​query_range.​align_queries_with_step | bool | `true` | | +| config.​query_range.​cache_results | bool | `true` | | +| config.​query_range.​results_cache.​cache.​memcached.​expiration | string | `"1h"` | | +| config.​query_range.​results_cache.​cache.​memcached_client.​timeout | string | `"1s"` | | +| config.​query_range.​split_queries_by_interval | string | `"24h"` | | +| config.​ruler.​enable_alertmanager_discovery | bool | `false` | | +| config.​ruler.​enable_api | bool | `true` | Enable the experimental ruler config api. | +| config.​ruler.​storage | object | `{}` | Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config | +| config.​runtime_config.​file | string | `"/etc/cortex-runtime-config/runtime_config.yaml"` | | +| config.​server.​grpc_listen_port | int | `9095` | | +| config.​server.​grpc_server_max_concurrent_streams | int | `10000` | | +| config.​server.​grpc_server_max_recv_msg_size | int | `10485760` | | +| config.​server.​grpc_server_max_send_msg_size | int | `10485760` | | +| config.​server.​http_listen_port | int | `8080` | | +| config.​storage | object | `{"engine":"blocks","index_queries_cache_config":{"memcached":{"expiration":"1h"},"memcached_client":{"timeout":"1s"}}}` | See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config | +| config.​storage.​index_queries_cache_config.​memcached.​expiration | string | `"1h"` | How long keys stay in the memcache | +| config.​storage.​index_queries_cache_config.​memcached_client.​timeout | string | `"1s"` | Maximum time to wait before giving up on memcached requests. | +| config.​store_gateway | object | `{"sharding_enabled":false}` | https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config | +| configs.​affinity | object | `{}` | | +| configs.​annotations | object | `{}` | | +| configs.​containerSecurityContext.​enabled | bool | `true` | | +| configs.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| configs.​enabled | bool | `false` | | +| configs.​env | list | `[]` | | +| configs.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| configs.​extraContainers | list | `[]` | | +| configs.​extraPorts | list | `[]` | | +| configs.​extraVolumeMounts | list | `[]` | | +| configs.​extraVolumes | list | `[]` | | +| configs.​initContainers | list | `[]` | | +| configs.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​nodeSelector | object | `{}` | | +| configs.​persistentVolume.​subPath | string | `nil` | | +| configs.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| configs.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| configs.​podLabels | object | `{}` | Pod Labels | +| configs.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​replicas | int | `1` | | +| configs.​resources | object | `{}` | | +| configs.​securityContext | object | `{}` | | +| configs.​service.​annotations | object | `{}` | | +| configs.​service.​labels | object | `{}` | | +| configs.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| configs.​serviceMonitor.​additionalLabels | object | `{}` | | +| configs.​serviceMonitor.​enabled | bool | `false` | | +| configs.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| configs.​serviceMonitor.​metricRelabelings | list | `[]` | | +| configs.​serviceMonitor.​relabelings | list | `[]` | | +| configs.​startupProbe.​failureThreshold | int | `10` | | +| configs.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| configs.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| configs.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| configs.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| configs.​strategy.​type | string | `"RollingUpdate"` | | +| configs.​terminationGracePeriodSeconds | int | `180` | | +| configs.​tolerations | list | `[]` | | +| configsdb_postgresql.​auth.​existing_secret.​key | string | `nil` | | +| configsdb_postgresql.​auth.​existing_secret.​name | string | `nil` | | +| configsdb_postgresql.​auth.​password | string | `nil` | | +| configsdb_postgresql.​enabled | bool | `false` | | +| configsdb_postgresql.​uri | string | `nil` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"distributor"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| distributor.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| distributor.​annotations | object | `{}` | | +| distributor.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| distributor.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the distributor pods. | +| distributor.​autoscaling.​maxReplicas | int | `30` | | +| distributor.​autoscaling.​minReplicas | int | `2` | | +| distributor.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| distributor.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| distributor.​containerSecurityContext.​enabled | bool | `true` | | +| distributor.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| distributor.​env | list | `[]` | | +| distributor.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| distributor.​extraContainers | list | `[]` | | +| distributor.​extraPorts | list | `[]` | | +| distributor.​extraVolumeMounts | list | `[]` | | +| distributor.​extraVolumes | list | `[]` | | +| distributor.​initContainers | list | `[]` | | +| distributor.​lifecycle | object | `{}` | | +| distributor.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​nodeSelector | object | `{}` | | +| distributor.​persistentVolume.​subPath | string | `nil` | | +| distributor.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| distributor.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| distributor.​podLabels | object | `{}` | Pod Labels | +| distributor.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​replicas | int | `2` | | +| distributor.​resources | object | `{}` | | +| distributor.​securityContext | object | `{}` | | +| distributor.​service.​annotations | object | `{}` | | +| distributor.​service.​labels | object | `{}` | | +| distributor.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| distributor.​serviceMonitor.​additionalLabels | object | `{}` | | +| distributor.​serviceMonitor.​enabled | bool | `false` | | +| distributor.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| distributor.​serviceMonitor.​metricRelabelings | list | `[]` | | +| distributor.​serviceMonitor.​relabelings | list | `[]` | | +| distributor.​startupProbe.​failureThreshold | int | `10` | | +| distributor.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| distributor.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| distributor.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| distributor.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| distributor.​strategy.​type | string | `"RollingUpdate"` | | +| distributor.​terminationGracePeriodSeconds | int | `60` | | +| distributor.​tolerations | list | `[]` | | +| externalConfigSecretName | string | `"secret-with-config.yaml"` | | +| externalConfigVersion | string | `"0"` | | +| image.​pullPolicy | string | `"IfNotPresent"` | | +| image.​pullSecrets | list | `[]` | Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace. ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| image.​repository | string | `"quay.io/cortexproject/cortex"` | | +| image.​tag | string | `""` | Allows you to override the cortex version in this chart. Use at your own risk. | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"ingester"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| ingester.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| ingester.​annotations | object | `{}` | | +| ingester.​autoscaling.​behavior.​scaleDown.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details | +| ingester.​autoscaling.​behavior.​scaleDown.​stabilizationWindowSeconds | int | `3600` | uses metrics from the past 1h to make scaleDown decisions | +| ingester.​autoscaling.​behavior.​scaleUp.​policies | list | `[{"periodSeconds":1800,"type":"Pods","value":1}]` | This default scaleup policy allows adding 1 pod every 30 minutes. Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| ingester.​autoscaling.​enabled | bool | `false` | | +| ingester.​autoscaling.​maxReplicas | int | `30` | | +| ingester.​autoscaling.​minReplicas | int | `3` | | +| ingester.​autoscaling.​targetMemoryUtilizationPercentage | int | `80` | | +| ingester.​containerSecurityContext.​enabled | bool | `true` | | +| ingester.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ingester.​env | list | `[]` | | +| ingester.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ingester.​extraContainers | list | `[]` | | +| ingester.​extraPorts | list | `[]` | | +| ingester.​extraVolumeMounts | list | `[]` | | +| ingester.​extraVolumes | list | `[]` | | +| ingester.​initContainers | list | `[]` | | +| ingester.​lifecycle.​preStop | object | `{"httpGet":{"path":"/ingester/shutdown","port":"http-metrics"}}` | The /shutdown preStop hook is recommended as part of the ingester scaledown process, but can be removed to optimize rolling restarts in instances that will never be scaled down or when using chunks storage with WAL disabled. https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down | +| ingester.​livenessProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​nodeSelector | object | `{}` | | +| ingester.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Ingester data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| ingester.​persistentVolume.​annotations | object | `{}` | Ingester data Persistent Volume Claim annotations | +| ingester.​persistentVolume.​enabled | bool | `true` | If true and ingester.statefulSet.enabled is true, Ingester will create/use a Persistent Volume Claim If false, use emptyDir | +| ingester.​persistentVolume.​size | string | `"2Gi"` | Ingester data Persistent Volume size | +| ingester.​persistentVolume.​storageClass | string | `nil` | Ingester data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| ingester.​persistentVolume.​subPath | string | `""` | Subdirectory of Ingester data Persistent Volume to mount Useful if the volume's root directory is not empty | +| ingester.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ingester.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ingester.​podLabels | object | `{}` | Pod Labels | +| ingester.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ingester.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ingester.​replicas | int | `3` | | +| ingester.​resources | object | `{}` | | +| ingester.​securityContext | object | `{}` | | +| ingester.​service.​annotations | object | `{}` | | +| ingester.​service.​labels | object | `{}` | | +| ingester.​serviceAccount.​name | string | `nil` | | +| ingester.​serviceMonitor.​additionalLabels | object | `{}` | | +| ingester.​serviceMonitor.​enabled | bool | `false` | | +| ingester.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ingester.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ingester.​serviceMonitor.​relabelings | list | `[]` | | +| ingester.​startupProbe | object | `{}` | Startup/liveness probes for ingesters are not recommended. Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters | +| ingester.​statefulSet.​enabled | bool | `false` | If true, use a statefulset instead of a deployment for pod management. This is useful when using WAL | +| ingester.​statefulSet.​podManagementPolicy | string | `"OrderedReady"` | ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details | +| ingester.​statefulStrategy.​type | string | `"RollingUpdate"` | | +| ingester.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ingester.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ingester.​strategy.​type | string | `"RollingUpdate"` | | +| ingester.​terminationGracePeriodSeconds | int | `240` | | +| ingester.​tolerations | list | `[]` | | +| ingress.​annotations | object | `{}` | | +| ingress.​enabled | bool | `false` | | +| ingress.​hosts[0].​host | string | `"chart-example.local"` | | +| ingress.​hosts[0].​paths[0] | string | `"/"` | | +| ingress.​ingressClass.​enabled | bool | `false` | | +| ingress.​ingressClass.​name | string | `"nginx"` | | +| ingress.​tls | list | `[]` | | +| memcached | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | chunk caching for legacy chunk storage engine | +| memcached-blocks-index.​architecture | string | `"high-availability"` | | +| memcached-blocks-index.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-index.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-index.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-index.​metrics.​enabled | bool | `true` | | +| memcached-blocks-index.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-index.​replicaCount | int | `2` | | +| memcached-blocks-index.​resources | object | `{}` | | +| memcached-blocks-metadata.​architecture | string | `"high-availability"` | | +| memcached-blocks-metadata.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks-metadata.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks-metadata.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks-metadata.​metrics.​enabled | bool | `true` | | +| memcached-blocks-metadata.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks-metadata.​replicaCount | int | `2` | | +| memcached-blocks-metadata.​resources | object | `{}` | | +| memcached-blocks.​architecture | string | `"high-availability"` | | +| memcached-blocks.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-blocks.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-blocks.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-blocks.​metrics.​enabled | bool | `true` | | +| memcached-blocks.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-blocks.​replicaCount | int | `2` | | +| memcached-blocks.​resources | object | `{}` | | +| memcached-frontend.​architecture | string | `"high-availability"` | | +| memcached-frontend.​enabled | bool | `false` | | +| memcached-frontend.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-frontend.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-frontend.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-frontend.​metrics.​enabled | bool | `true` | | +| memcached-frontend.​metrics.​serviceMonitor.​enabled | bool | `false` | | +| memcached-frontend.​replicaCount | int | `2` | | +| memcached-frontend.​resources | object | `{}` | | +| memcached-index-read | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index read caching for legacy chunk storage engine | +| memcached-index-read.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-read.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-read.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached-index-write | object | `{"architecture":"high-availability","enabled":false,"extraEnv":[{"name":"MEMCACHED_CACHE_SIZE","value":"1024"},{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"},{"name":"MEMCACHED_THREADS","value":"4"}],"metrics":{"enabled":true,"serviceMonitor":{"enabled":false}},"replicaCount":2,"resources":{}}` | index write caching for legacy chunk storage engine | +| memcached-index-write.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached-index-write.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached-index-write.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| memcached.​extraEnv[0] | object | `{"name":"MEMCACHED_CACHE_SIZE","value":"1024"}` | MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage | +| memcached.​extraEnv[1] | object | `{"name":"MEMCACHED_MAX_CONNECTIONS","value":"1024"}` | MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service | +| memcached.​extraEnv[2] | object | `{"name":"MEMCACHED_THREADS","value":"4"}` | MEMCACHED_THREADS is the number of threads to use when processing incoming requests. By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. | +| nginx.​affinity | object | `{}` | | +| nginx.​annotations | object | `{}` | | +| nginx.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| nginx.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the nginx pods. | +| nginx.​autoscaling.​maxReplicas | int | `30` | | +| nginx.​autoscaling.​minReplicas | int | `2` | | +| nginx.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| nginx.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| nginx.​config.​auth_orgs | list | `[]` | (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config | +| nginx.​config.​basicAuthSecretName | string | `""` | (optional) Name of basic auth secret. In order to use this option, a secret with htpasswd formatted contents at the key ".htpasswd" must exist. For example: apiVersion: v1 kind: Secret metadata: name: my-secret namespace: stringData: .htpasswd: | user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ Please note that the use of basic auth will not identify organizations the way X-Scope-OrgID does. Thus, the use of basic auth alone will not prevent one tenant from viewing the metrics of another. To ensure tenants are scoped appropriately, explicitly set the `X-Scope-OrgID` header in the nginx config. Example setHeaders: X-Scope-OrgID: $remote_user | +| nginx.​config.​client_max_body_size | string | `"1M"` | ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size | +| nginx.​config.​dnsResolver | string | `"coredns.kube-system.svc.cluster.local"` | | +| nginx.​config.​httpSnippet | string | `""` | arbitrary snippet to inject in the http { } section of the nginx config | +| nginx.​config.​mainSnippet | string | `""` | arbitrary snippet to inject in the top section of the nginx config | +| nginx.​config.​serverSnippet | string | `""` | arbitrary snippet to inject in the server { } section of the nginx config | +| nginx.​config.​setHeaders | object | `{}` | | +| nginx.​containerSecurityContext.​enabled | bool | `true` | | +| nginx.​containerSecurityContext.​readOnlyRootFilesystem | bool | `false` | | +| nginx.​enabled | bool | `true` | | +| nginx.​env | list | `[]` | | +| nginx.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| nginx.​extraContainers | list | `[]` | | +| nginx.​extraPorts | list | `[]` | | +| nginx.​extraVolumeMounts | list | `[]` | | +| nginx.​extraVolumes | list | `[]` | | +| nginx.​http_listen_port | int | `80` | | +| nginx.​image.​pullPolicy | string | `"IfNotPresent"` | | +| nginx.​image.​repository | string | `"nginx"` | | +| nginx.​image.​tag | float | `1.21` | | +| nginx.​initContainers | list | `[]` | | +| nginx.​livenessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​nodeSelector | object | `{}` | | +| nginx.​persistentVolume.​subPath | string | `nil` | | +| nginx.​podAnnotations | object | `{}` | Pod Annotations | +| nginx.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| nginx.​podLabels | object | `{}` | Pod Labels | +| nginx.​readinessProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​replicas | int | `2` | | +| nginx.​resources | object | `{}` | | +| nginx.​securityContext | object | `{}` | | +| nginx.​service.​annotations | object | `{}` | | +| nginx.​service.​labels | object | `{}` | | +| nginx.​service.​type | string | `"ClusterIP"` | | +| nginx.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| nginx.​startupProbe.​failureThreshold | int | `10` | | +| nginx.​startupProbe.​httpGet.​path | string | `"/healthz"` | | +| nginx.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| nginx.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| nginx.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| nginx.​strategy.​type | string | `"RollingUpdate"` | | +| nginx.​terminationGracePeriodSeconds | int | `10` | | +| nginx.​tolerations | list | `[]` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"querier"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| querier.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| querier.​annotations | object | `{}` | | +| querier.​autoscaling.​behavior | object | `{}` | Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior | +| querier.​autoscaling.​enabled | bool | `false` | Creates a HorizontalPodAutoscaler for the querier pods. | +| querier.​autoscaling.​maxReplicas | int | `30` | | +| querier.​autoscaling.​minReplicas | int | `2` | | +| querier.​autoscaling.​targetCPUUtilizationPercentage | int | `80` | | +| querier.​autoscaling.​targetMemoryUtilizationPercentage | int | `0` | | +| querier.​containerSecurityContext.​enabled | bool | `true` | | +| querier.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| querier.​env | list | `[]` | | +| querier.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| querier.​extraContainers | list | `[]` | | +| querier.​extraPorts | list | `[]` | | +| querier.​extraVolumeMounts | list | `[]` | | +| querier.​extraVolumes | list | `[]` | | +| querier.​initContainers | list | `[]` | | +| querier.​lifecycle | object | `{}` | | +| querier.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​nodeSelector | object | `{}` | | +| querier.​persistentVolume.​subPath | string | `nil` | | +| querier.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| querier.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| querier.​podLabels | object | `{}` | Pod Labels | +| querier.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​replicas | int | `2` | | +| querier.​resources | object | `{}` | | +| querier.​securityContext | object | `{}` | | +| querier.​service.​annotations | object | `{}` | | +| querier.​service.​labels | object | `{}` | | +| querier.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| querier.​serviceMonitor.​additionalLabels | object | `{}` | | +| querier.​serviceMonitor.​enabled | bool | `false` | | +| querier.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| querier.​serviceMonitor.​metricRelabelings | list | `[]` | | +| querier.​serviceMonitor.​relabelings | list | `[]` | | +| querier.​startupProbe.​failureThreshold | int | `10` | | +| querier.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| querier.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| querier.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| querier.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| querier.​strategy.​type | string | `"RollingUpdate"` | | +| querier.​terminationGracePeriodSeconds | int | `180` | | +| querier.​tolerations | list | `[]` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"query-frontend"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| query_frontend.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| query_frontend.​annotations | object | `{}` | | +| query_frontend.​containerSecurityContext.​enabled | bool | `true` | | +| query_frontend.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| query_frontend.​env | list | `[]` | | +| query_frontend.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| query_frontend.​extraContainers | list | `[]` | | +| query_frontend.​extraPorts | list | `[]` | | +| query_frontend.​extraVolumeMounts | list | `[]` | | +| query_frontend.​extraVolumes | list | `[]` | | +| query_frontend.​initContainers | list | `[]` | | +| query_frontend.​lifecycle | object | `{}` | | +| query_frontend.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​nodeSelector | object | `{}` | | +| query_frontend.​persistentVolume.​subPath | string | `nil` | | +| query_frontend.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| query_frontend.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| query_frontend.​podLabels | object | `{}` | Pod Labels | +| query_frontend.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​replicas | int | `2` | | +| query_frontend.​resources | object | `{}` | | +| query_frontend.​securityContext | object | `{}` | | +| query_frontend.​service.​annotations | object | `{}` | | +| query_frontend.​service.​labels | object | `{}` | | +| query_frontend.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| query_frontend.​serviceMonitor.​additionalLabels | object | `{}` | | +| query_frontend.​serviceMonitor.​enabled | bool | `false` | | +| query_frontend.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| query_frontend.​serviceMonitor.​metricRelabelings | list | `[]` | | +| query_frontend.​serviceMonitor.​relabelings | list | `[]` | | +| query_frontend.​startupProbe.​failureThreshold | int | `10` | | +| query_frontend.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| query_frontend.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| query_frontend.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| query_frontend.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| query_frontend.​strategy.​type | string | `"RollingUpdate"` | | +| query_frontend.​terminationGracePeriodSeconds | int | `180` | | +| query_frontend.​tolerations | list | `[]` | | +| ruler.​affinity | object | `{}` | | +| ruler.​annotations | object | `{}` | | +| ruler.​containerSecurityContext.​enabled | bool | `true` | | +| ruler.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| ruler.​directories | object | `{}` | allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html | +| ruler.​enabled | bool | `true` | | +| ruler.​env | list | `[]` | | +| ruler.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| ruler.​extraContainers | list | `[]` | | +| ruler.​extraPorts | list | `[]` | | +| ruler.​extraVolumeMounts | list | `[]` | | +| ruler.​extraVolumes | list | `[]` | | +| ruler.​initContainers | list | `[]` | | +| ruler.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​nodeSelector | object | `{}` | | +| ruler.​persistentVolume.​subPath | string | `nil` | | +| ruler.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| ruler.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| ruler.​podLabels | object | `{}` | Pod Labels | +| ruler.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​replicas | int | `1` | | +| ruler.​resources | object | `{}` | | +| ruler.​securityContext | object | `{}` | | +| ruler.​service.​annotations | object | `{}` | | +| ruler.​service.​labels | object | `{}` | | +| ruler.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| ruler.​serviceMonitor.​additionalLabels | object | `{}` | | +| ruler.​serviceMonitor.​enabled | bool | `false` | | +| ruler.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| ruler.​serviceMonitor.​metricRelabelings | list | `[]` | | +| ruler.​serviceMonitor.​relabelings | list | `[]` | | +| ruler.​sidecar | object | `{"containerSecurityContext":{"enabled":true,"readOnlyRootFilesystem":true},"defaultFolderName":null,"enableUniqueFilenames":false,"enabled":false,"folder":"/tmp/rules","folderAnnotation":null,"image":{"repository":"quay.io/kiwigrid/k8s-sidecar","sha":"","tag":"1.10.7"},"imagePullPolicy":"IfNotPresent","label":"cortex_rules","labelValue":null,"resources":{},"searchNamespace":null,"watchMethod":null}` | Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders | +| ruler.​sidecar.​defaultFolderName | string | `nil` | The default folder name, it will create a subfolder under the `folder` and put rules in there instead | +| ruler.​sidecar.​folder | string | `"/tmp/rules"` | folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) | +| ruler.​sidecar.​folderAnnotation | string | `nil` | If specified, the sidecar will look for annotation with this name to create folder and put graph here. You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. | +| ruler.​sidecar.​label | string | `"cortex_rules"` | label that the configmaps with rules are marked with | +| ruler.​sidecar.​labelValue | string | `nil` | value of label that the configmaps with rules are set to | +| ruler.​sidecar.​searchNamespace | string | `nil` | If specified, the sidecar will search for rules config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | +| ruler.​startupProbe.​failureThreshold | int | `10` | | +| ruler.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| ruler.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| ruler.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| ruler.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| ruler.​strategy.​type | string | `"RollingUpdate"` | | +| ruler.​terminationGracePeriodSeconds | int | `180` | | +| ruler.​tolerations | list | `[]` | | +| runtimeconfigmap.​annotations | object | `{}` | | +| runtimeconfigmap.​create | bool | `true` | If true, a configmap for the `runtime_config` will be created. If false, the configmap _must_ exist already on the cluster or pods will fail to create. | +| runtimeconfigmap.​runtime_config | object | `{}` | https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file | +| serviceAccount.​annotations | object | `{}` | | +| serviceAccount.​automountServiceAccountToken | bool | `true` | | +| serviceAccount.​create | bool | `true` | | +| serviceAccount.​name | string | `nil` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​key | string | `"app.kubernetes.io/component"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​operator | string | `"In"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​labelSelector.​matchExpressions[0].​values[0] | string | `"store-gateway"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​podAffinityTerm.​topologyKey | string | `"kubernetes.io/hostname"` | | +| store_gateway.​affinity.​podAntiAffinity.​preferredDuringSchedulingIgnoredDuringExecution[0].​weight | int | `100` | | +| store_gateway.​annotations | object | `{}` | | +| store_gateway.​containerSecurityContext.​enabled | bool | `true` | | +| store_gateway.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| store_gateway.​env | list | `[]` | | +| store_gateway.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| store_gateway.​extraContainers | list | `[]` | | +| store_gateway.​extraPorts | list | `[]` | | +| store_gateway.​extraVolumeMounts | list | `[]` | | +| store_gateway.​extraVolumes | list | `[]` | | +| store_gateway.​initContainers | list | `[]` | | +| store_gateway.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​livenessProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​nodeSelector | object | `{}` | | +| store_gateway.​persistentVolume.​accessModes | list | `["ReadWriteOnce"]` | Store-gateway data Persistent Volume access modes Must match those of existing PV or dynamic provisioner Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ | +| store_gateway.​persistentVolume.​annotations | object | `{}` | Store-gateway data Persistent Volume Claim annotations | +| store_gateway.​persistentVolume.​enabled | bool | `true` | If true Store-gateway will create/use a Persistent Volume Claim If false, use emptyDir | +| store_gateway.​persistentVolume.​size | string | `"2Gi"` | Store-gateway data Persistent Volume size | +| store_gateway.​persistentVolume.​storageClass | string | `nil` | Store-gateway data Persistent Volume Storage Class If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| store_gateway.​persistentVolume.​subPath | string | `""` | Subdirectory of Store-gateway data Persistent Volume to mount Useful if the volume's root directory is not empty | +| store_gateway.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| store_gateway.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| store_gateway.​podLabels | object | `{}` | Pod Labels | +| store_gateway.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​replicas | int | `1` | | +| store_gateway.​resources | object | `{}` | | +| store_gateway.​securityContext | object | `{}` | | +| store_gateway.​service.​annotations | object | `{}` | | +| store_gateway.​service.​labels | object | `{}` | | +| store_gateway.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| store_gateway.​serviceMonitor.​additionalLabels | object | `{}` | | +| store_gateway.​serviceMonitor.​enabled | bool | `false` | | +| store_gateway.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| store_gateway.​serviceMonitor.​metricRelabelings | list | `[]` | | +| store_gateway.​serviceMonitor.​relabelings | list | `[]` | | +| store_gateway.​startupProbe.​failureThreshold | int | `60` | | +| store_gateway.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| store_gateway.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| store_gateway.​startupProbe.​httpGet.​scheme | string | `"HTTP"` | | +| store_gateway.​startupProbe.​initialDelaySeconds | int | `120` | | +| store_gateway.​startupProbe.​periodSeconds | int | `30` | | +| store_gateway.​strategy.​type | string | `"RollingUpdate"` | | +| store_gateway.​terminationGracePeriodSeconds | int | `240` | | +| store_gateway.​tolerations | list | `[]` | | +| table_manager.​affinity | object | `{}` | | +| table_manager.​annotations | object | `{}` | | +| table_manager.​containerSecurityContext.​enabled | bool | `true` | | +| table_manager.​containerSecurityContext.​readOnlyRootFilesystem | bool | `true` | | +| table_manager.​env | list | `[]` | | +| table_manager.​extraArgs | object | `{}` | Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) | +| table_manager.​extraContainers | list | `[]` | | +| table_manager.​extraPorts | list | `[]` | | +| table_manager.​extraVolumeMounts | list | `[]` | | +| table_manager.​extraVolumes | list | `[]` | | +| table_manager.​initContainers | list | `[]` | | +| table_manager.​livenessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​livenessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​nodeSelector | object | `{}` | | +| table_manager.​persistentVolume.​subPath | string | `nil` | | +| table_manager.​podAnnotations | object | `{"prometheus.io/port":"8080","prometheus.io/scrape":"true"}` | Pod Annotations | +| table_manager.​podDisruptionBudget.​maxUnavailable | int | `1` | | +| table_manager.​podLabels | object | `{}` | Pod Labels | +| table_manager.​readinessProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​readinessProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​replicas | int | `1` | | +| table_manager.​resources | object | `{}` | | +| table_manager.​securityContext | object | `{}` | | +| table_manager.​service.​annotations | object | `{}` | | +| table_manager.​service.​labels | object | `{}` | | +| table_manager.​serviceAccount.​name | string | `""` | "" disables the individual serviceAccount and uses the global serviceAccount for that component | +| table_manager.​serviceMonitor.​additionalLabels | object | `{}` | | +| table_manager.​serviceMonitor.​enabled | bool | `false` | | +| table_manager.​serviceMonitor.​extraEndpointSpec | object | `{}` | Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| table_manager.​serviceMonitor.​metricRelabelings | list | `[]` | | +| table_manager.​serviceMonitor.​relabelings | list | `[]` | | +| table_manager.​startupProbe.​failureThreshold | int | `10` | | +| table_manager.​startupProbe.​httpGet.​path | string | `"/ready"` | | +| table_manager.​startupProbe.​httpGet.​port | string | `"http-metrics"` | | +| table_manager.​strategy.​rollingUpdate.​maxSurge | int | `0` | | +| table_manager.​strategy.​rollingUpdate.​maxUnavailable | int | `1` | | +| table_manager.​strategy.​type | string | `"RollingUpdate"` | | +| table_manager.​terminationGracePeriodSeconds | int | `180` | | +| table_manager.​tolerations | list | `[]` | | +| tags.​blocks-storage-memcached | bool | `false` | Set to true to enable block storage memcached caching | +| useConfigMap | bool | `false` | | +| useExternalConfig | bool | `false` | | + diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/NOTES.txt b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/NOTES.txt new file mode 100644 index 0000000..1bd3203 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/NOTES.txt @@ -0,0 +1,9 @@ +{{- if eq .Values.config.storage.engine "chunks" }} +Cortex chunks storage has been deprecated, and it's now in maintenance mode: all Cortex users are encouraged to migrate to the blocks storage. +No new features will be added to the chunks storage. +Unlike the official cortex default configuration this helm-chart does not run the chunk engine by default. +{{- end }} + +Verify the application is working by running these commands: + kubectl --namespace {{ .Release.Namespace }} port-forward service/{{ include "cortex.querierFullname" . }} {{ .Values.config.server.http_listen_port }} + curl http://127.0.0.1:{{ .Values.config.server.http_listen_port }}/services diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/_helpers.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/_helpers.tpl new file mode 100644 index 0000000..81914c9 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/_helpers.tpl @@ -0,0 +1,155 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cortex.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cortex.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cortex.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "cortex.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "cortex.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the app name of cortex clients. Defaults to the same logic as "cortex.fullname", and default client expects "prometheus". +*/}} +{{- define "client.name" -}} +{{- if .Values.client.name -}} +{{- .Values.client.name -}} +{{- else if .Values.client.fullnameOverride -}} +{{- .Values.client.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default "prometheus" .Values.client.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Common labels +*/}} +{{- define "cortex.labels" -}} +helm.sh/chart: {{ include "cortex.chart" . }} +{{ include "cortex.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "cortex.selectorLabels" -}} +app.kubernetes.io/name: {{ include "cortex.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create configuration parameters for memcached configuration +*/}} +{{- define "cortex.memcached" -}} +{{- if and (eq .Values.config.storage.engine "blocks") (index .Values "tags" "blocks-storage-memcached") }} +- "-blocks-storage.bucket-store.index-cache.backend=memcached" +- "-blocks-storage.bucket-store.index-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-index.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.chunks-cache.backend=memcached" +- "-blocks-storage.bucket-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +- "-blocks-storage.bucket-store.metadata-cache.backend=memcached" +- "-blocks-storage.bucket-store.metadata-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached-blocks-metadata.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") .Values.memcached.enabled }} +- "-store.chunks-cache.memcached.addresses=dns+{{ .Release.Name }}-memcached.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-read" "enabled") }} +- "-store.index-cache-read.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-read.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- if and (ne .Values.config.storage.engine "blocks") (index .Values "memcached-index-write" "enabled") }} +- "-store.index-cache-write.memcached.addresses=dns+{{ .Release.Name }}-memcached-index-write.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Create configuration for frontend memcached configuration +*/}} +{{- define "cortex.frontend-memcached" -}} +{{- if index .Values "memcached-frontend" "enabled" }} +- "-frontend.memcached.addresses=dns+{{ template "cortex.fullname" . }}-memcached-frontend.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:11211" +{{- end -}} +{{- end -}} + +{{/* +Determine the policy api version +*/}} +{{- define "cortex.pdbVersion" -}} +{{- if or (.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget") (semverCompare ">=1.21" .Capabilities.KubeVersion.Version) -}} +policy/v1 +{{- else -}} +policy/v1beta1 +{{- end -}} +{{- end -}} + +{{/* +Get checksum of config secret or configMap +*/}} +{{- define "cortex.configChecksum" -}} +{{- if .Values.useExternalConfig -}} +{{- .Values.externalConfigVersion -}} +{{- else if .Values.useConfigMap -}} +{{- include (print $.Template.BasePath "/configmap.yaml") . | sha256sum -}} +{{- else -}} +{{- include (print $.Template.BasePath "/secret.yaml") . | sha256sum -}} +{{- end -}} +{{- end -}} + +{{/* +Get volume of config secret of configMap +*/}} +{{- define "cortex.configVolume" -}} +- name: config + {{- if .Values.useExternalConfig }} + secret: + secretName: {{ .Values.externalConfigSecretName }} + {{- else if .Values.useConfigMap }} + configMap: + name: {{ template "cortex.fullname" . }}-config + {{- else }} + secret: + secretName: {{ template "cortex.fullname" . }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml new file mode 100644 index 0000000..49c4ca7 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-dep.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alertmanager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + name: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - name: alertmanager +# image: quay.io/cortexproject/cortex:v1.9.0 +# image: registry.cloud.intermax:5000/library/cortex:v1.11.0 + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cortex:v1.11.0 + imagePullPolicy: IfNotPresent + args: + - -target=alertmanager +# - -log.level=debug + - -server.http-listen-port=80 + - -alertmanager.configs.url=http://{{ template "cortex.fullname" . }}-configs:8080 + - -alertmanager.web.external-url=/alertmanager + ports: + - containerPort: 80 diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml new file mode 100644 index 0000000..989feb2 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/alertmanager/alertmanager-svc.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: alertmanager +spec: + ports: + - port: 80 + selector: + name: alertmanager diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml new file mode 100644 index 0000000..cf7f25a --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrole.yaml @@ -0,0 +1,12 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "cortex.fullname" . }}-clusterrole + labels: + {{- include "cortex.labels" . | nindent 4 }} +rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..c1d9884 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if or .Values.ruler.sidecar.enabled .Values.alertmanager.sidecar.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cortex.fullname" . }}-clusterrolebinding + labels: + {{- include "cortex.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cortex.fullname" . }}-clusterrole +subjects: + - kind: ServiceAccount + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl new file mode 100644 index 0000000..f89b33c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/_helpers-compactor.tpl @@ -0,0 +1,23 @@ + +{{/* +compactor fullname +*/}} +{{- define "cortex.compactorFullname" -}} +{{ include "cortex.fullname" . }}-compactor +{{- end }} + +{{/* +compactor common labels +*/}} +{{- define "cortex.compactorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: compactor +{{- end }} + +{{/* +compactor selector labels +*/}} +{{- define "cortex.compactorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: compactor +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml new file mode 100644 index 0000000..8634e4c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.compactor.replicas) 1) (.Values.compactor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.compactor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml new file mode 100644 index 0000000..a33e849 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.compactor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- if .Values.compactor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.compactor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.compactor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.compactor.serviceMonitor.interval }} + interval: {{ .Values.compactor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.compactor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.compactor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.compactor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.compactor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.compactor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml new file mode 100644 index 0000000..c0a1baf --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-statefulset.yaml @@ -0,0 +1,141 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.compactor.annotations | nindent 4 }} +spec: + replicas: {{ .Values.compactor.replicas }} + selector: + matchLabels: + {{- include "cortex.compactorSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.compactor.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-compactor + {{- if .Values.compactor.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.compactor.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.compactor.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.compactor.persistentVolume.storageClass }} + {{- if (eq "-" .Values.compactor.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.compactor.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.compactor.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.compactor.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.compactorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.compactor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.compactor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.compactor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.compactor.priorityClassName }} + priorityClassName: {{ .Values.compactor.priorityClassName }} + {{- end }} + {{- if .Values.compactor.securityContext.enabled }} + securityContext: {{- omit .Values.compactor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.compactor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.compactor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.compactor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.compactor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.compactor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.compactor.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.compactor.extraVolumes }} + {{- toYaml .Values.compactor.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.compactor.extraContainers }} + {{ toYaml .Values.compactor.extraContainers | nindent 8 }} + {{- end }} + - name: compactor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=compactor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.compactor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.compactor.extraVolumeMounts }} + {{- toYaml .Values.compactor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.compactor.persistentVolume.subPath }} + subPath: {{ .Values.compactor.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.compactor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.compactor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.compactor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.compactor.resources | nindent 12 }} + {{- if .Values.compactor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.compactor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.compactor.env }} + env: + {{- toYaml .Values.compactor.env | nindent 12 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml new file mode 100644 index 0000000..ae20f78 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/compactor/compactor-svc.yaml @@ -0,0 +1,25 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +{{- if .Values.compactor.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.compactorLabels" . | nindent 4 }} + {{- with .Values.compactor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.compactor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.compactorSelectorLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configmap.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configmap.yaml new file mode 100644 index 0000000..001b13a --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configmap.yaml @@ -0,0 +1,12 @@ +{{- if (and (not .Values.useExternalConfig) (.Values.useConfigMap)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" . }}-config + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: | + {{- tpl (toYaml .Values.config) . | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl new file mode 100644 index 0000000..c8945dc --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/_helpers-configs.tpl @@ -0,0 +1,23 @@ + +{{/* +configs fullname +*/}} +{{- define "cortex.configsFullname" -}} +{{ include "cortex.fullname" . }}-configs +{{- end }} + +{{/* +configs common labels +*/}} +{{- define "cortex.configsLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: configs +{{- end }} + +{{/* +configs selector labels +*/}} +{{- define "cortex.configsSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: configs +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml new file mode 100644 index 0000000..86048ce --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-dep.yaml @@ -0,0 +1,124 @@ +{{- if .Values.configs.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.configs.annotations | nindent 4 }} +spec: + replicas: {{ .Values.configs.replicas }} + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.configs.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.configsLabels" . | nindent 8 }} + {{- with .Values.configs.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.configs.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.configs.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.configs.priorityClassName }} + priorityClassName: {{ .Values.configs.priorityClassName }} + {{- end }} + {{- if .Values.configs.securityContext.enabled }} + securityContext: {{- omit .Values.configs.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.configs.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: configs + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=configs" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configsdb_postgresql.enabled }} + - "-configs.database.uri={{ .Values.configsdb_postgresql.uri }}" + - "-configs.database.password-file=/etc/postgresql/password" + - "-configs.database.migrations-dir=/migrations" + {{- else }} + - "-configs.database.uri=memory://" + {{- end }} + {{- range $key, $value := .Values.configs.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/cortex + subPath: {{ .Values.configs.persistentVolume.subPath }} + - name: runtime-config + mountPath: /etc/cortex-runtime-config + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + mountPath: /etc/postgresql + {{- end }} + {{- if .Values.configs.extraVolumeMounts }} + {{- toYaml .Values.configs.extraVolumeMounts | nindent 12}} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.configs.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.configs.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.configs.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.configs.resources | nindent 12 }} + {{- if .Values.configs.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.configs.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.configs.env }} + env: + {{- toYaml .Values.configs.env | nindent 12 }} + {{- end }} + {{- if .Values.configs.extraContainers }} + {{- toYaml .Values.configs.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.configs.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.configs.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.configs.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.configs.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + {{- if .Values.configsdb_postgresql.enabled }} + - name: postgres-password + secret: + secretName: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.name }}{{ else }}{{ template "cortex.fullname" . }}-postgresql{{ end }} + items: + - key: {{ if .Values.configsdb_postgresql.auth.existing_secret.name }}{{ .Values.configsdb_postgresql.auth.existing_secret.key }}{{ else }}postgresql-password{{ end }} + path: password + {{- end }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.configs.extraVolumes }} + {{- toYaml .Values.configs.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml new file mode 100644 index 0000000..b6e46b4 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.configs.replicas) 1) (.Values.configs.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.configs.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml new file mode 100644 index 0000000..393bc32 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.configs.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- if .Values.configs.serviceMonitor.additionalLabels }} +{{ toYaml .Values.configs.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.configs.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.configsSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.configs.serviceMonitor.interval }} + interval: {{ .Values.configs.serviceMonitor.interval }} + {{- end }} + {{- if .Values.configs.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.configs.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.configs.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.configs.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.configs.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.configs.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.configs.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml new file mode 100644 index 0000000..6dbc2cd --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/configs/configs-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.configs.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.configsFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.configsLabels" . | nindent 4 }} + {{- with .Values.configs.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.configs.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.configsSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml new file mode 100644 index 0000000..472f83e --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/cortex-pv.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-0 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH1 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-1 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH2 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ingester-pv-2 +spec: + capacity: + storage: 2Gi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ .Values.global.DEFAULT_STORAGE_CLASS }} + local: + path: {{ .Values.global.IMXC_INGESTER_PV_PATH3 }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl new file mode 100644 index 0000000..24e8d00 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/_helpers-distributor.tpl @@ -0,0 +1,23 @@ + +{{/* +distributor fullname +*/}} +{{- define "cortex.distributorFullname" -}} +{{ include "cortex.fullname" . }}-distributor +{{- end }} + +{{/* +distributor common labels +*/}} +{{- define "cortex.distributorLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: distributor +{{- end }} + +{{/* +distributor selector labels +*/}} +{{- define "cortex.distributorSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: distributor +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml new file mode 100644 index 0000000..fc9c0ba --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-dep.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.distributor.annotations | nindent 4 }} +spec: + {{- if not .Values.distributor.autoscaling.enabled }} + replicas: {{ .Values.distributor.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.distributor.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.distributorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.distributor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.distributor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.distributor.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.distributor.priorityClassName }} + priorityClassName: {{ .Values.distributor.priorityClassName }} + {{- end }} + {{- if .Values.distributor.securityContext.enabled }} + securityContext: {{- omit .Values.distributor.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.distributor.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: distributor + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=distributor" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.distributor.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.distributor.extraVolumeMounts }} + {{- toYaml .Values.distributor.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.distributor.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.distributor.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.distributor.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.distributor.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.distributor.resources | nindent 12 }} + {{- if .Values.distributor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.distributor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.distributor.env }} + env: + {{- toYaml .Values.distributor.env | nindent 12 }} + {{- end }} + {{- with .Values.distributor.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.distributor.extraContainers }} + {{- toYaml .Values.distributor.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.distributor.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.distributor.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.distributor.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.distributor.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.distributor.extraVolumes }} + {{- toYaml .Values.distributor.extraVolumes | nindent 8}} + {{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml new file mode 100644 index 0000000..0c1c9f6 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.distributor.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.distributorFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.distributorFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml new file mode 100644 index 0000000..7b05701 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.distributor.replicas) 1) (.Values.distributor.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.distributor.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml new file mode 100644 index 0000000..5db8389 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.distributor.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- if .Values.distributor.serviceMonitor.additionalLabels }} +{{ toYaml .Values.distributor.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.distributor.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.distributorSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.distributor.serviceMonitor.interval }} + interval: {{ .Values.distributor.serviceMonitor.interval }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.distributor.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.distributor.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.distributor.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.distributor.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.distributor.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml new file mode 100644 index 0000000..1c4f7f6 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml new file mode 100644 index 0000000..2db7197 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/distributor/distributor-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.distributor.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.distributorSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl new file mode 100644 index 0000000..4705327 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/_helpers-ingester.tpl @@ -0,0 +1,23 @@ + +{{/* +ingester fullname +*/}} +{{- define "cortex.ingesterFullname" -}} +{{ include "cortex.fullname" . }}-ingester +{{- end }} + +{{/* +ingester common labels +*/}} +{{- define "cortex.ingesterLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ingester +{{- end }} + +{{/* +ingester selector labels +*/}} +{{- define "cortex.ingesterSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ingester +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml new file mode 100644 index 0000000..b26d3a3 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-dep.yaml @@ -0,0 +1,130 @@ +{{- if not .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ingester.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.ingester.env }} + {{ toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- with .Values.ingester.extraContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml new file mode 100644 index 0000000..97c5290 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-hpa.yaml @@ -0,0 +1,29 @@ +{{- with .Values.ingester.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.ingesterFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ if $.Values.ingester.statefulSet.enabled }}StatefulSet{{ else }}Deployment{{ end }} + name: {{ include "cortex.ingesterFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .targetMemoryUtilizationPercentage }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml new file mode 100644 index 0000000..a47ecb4 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ingester.replicas) 1) (.Values.ingester.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ingester.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml new file mode 100644 index 0000000..310ca54 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ingester.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- if .Values.ingester.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ingester.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ingester.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ingester.serviceMonitor.interval }} + interval: {{ .Values.ingester.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ingester.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ingester.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ingester.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ingester.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml new file mode 100644 index 0000000..8016441 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-statefulset.yaml @@ -0,0 +1,153 @@ +{{- if .Values.ingester.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +spec: + {{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.ingesterSelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.ingester.statefulStrategy | nindent 4 }} + podManagementPolicy: "{{ .Values.ingester.statefulSet.podManagementPolicy }}" + serviceName: {{ template "cortex.fullname" . }}-ingester-headless + {{- if .Values.ingester.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.ingester.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.ingester.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.ingester.persistentVolume.storageClass }} + {{- if (eq "-" .Values.ingester.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.ingester.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{ toYaml .Values.ingester.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.ingester.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.ingesterLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ingester.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ingester.priorityClassName }} + priorityClassName: {{ .Values.ingester.priorityClassName }} + {{- end }} + {{- if .Values.ingester.securityContext.enabled }} + securityContext: {{- omit .Values.ingester.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ingester.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.ingester.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ingester.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ingester.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.ingester.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.ingester.extraVolumes }} + {{- toYaml .Values.ingester.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.ingester.extraContainers }} + {{- toYaml .Values.ingester.extraContainers | nindent 8 }} + {{- end }} + - name: ingester + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ingester" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ingester.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ingester.extraVolumeMounts }} + {{- toYaml .Values.ingester.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- with .Values.ingester.persistentVolume.subPath }} + subPath: {{ . }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + {{- if .Values.ingester.startupProbe }} + startupProbe: + {{- toYaml .Values.ingester.startupProbe | nindent 12 }} + {{- end }} + {{- if .Values.ingester.livenessProbe }} + livenessProbe: + {{- toYaml .Values.ingester.livenessProbe | nindent 12 }} + {{- end }} + readinessProbe: + {{- toYaml .Values.ingester.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ingester.resources | nindent 12 }} + {{- if .Values.ingester.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingester.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ingester.env }} + env: + {{- toYaml .Values.ingester.env | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml new file mode 100644 index 0000000..b783caa --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc-headless.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml new file mode 100644 index 0000000..02183ae --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ingester/ingester-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.ingesterSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl new file mode 100644 index 0000000..61d8b78 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/_helpers-nginx.tpl @@ -0,0 +1,23 @@ + +{{/* +nginx fullname +*/}} +{{- define "cortex.nginxFullname" -}} +{{ include "cortex.fullname" . }}-nginx +{{- end }} + +{{/* +nginx common labels +*/}} +{{- define "cortex.nginxLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: nginx +{{- end }} + +{{/* +nginx selector labels +*/}} +{{- define "cortex.nginxSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: nginx +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml new file mode 100644 index 0000000..fd3474d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-config.yaml @@ -0,0 +1,140 @@ +{{- if .Values.nginx.enabled }} +{{- $rootDomain := printf "%s.svc.%s:%d" .Release.Namespace .Values.clusterDomain (.Values.config.server.http_listen_port | int) }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +data: + nginx.conf: |- + worker_processes 5; ## Default: 1 + error_log /dev/stderr; + pid /tmp/nginx.pid; + worker_rlimit_nofile 8192; + + events { + worker_connections 4096; ## Default: 1024 + } + + {{- with .Values.nginx.config.mainSnippet }} + {{ tpl . $ | nindent 4 }} + {{- end }} + + http { + default_type application/octet-stream; + client_max_body_size {{.Values.nginx.config.client_max_body_size}}; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" $http_x_scope_orgid'; + access_log /dev/stderr main; + sendfile on; + tcp_nopush on; + resolver {{ default (printf "coredns.kube-system.svc.%s" .Values.clusterDomain ) .Values.nginx.config.dnsResolver }}; + + {{- with .Values.nginx.config.httpSnippet }} + {{ tpl . $ | nindent 6 }} + {{- end }} + + server { # simple reverse-proxy + listen {{ .Values.nginx.http_listen_port }}; + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + proxy_http_version 1.1; + proxy_set_header X-Scope-OrgID 0; + + {{- range $key, $value := .Values.nginx.config.setHeaders }} + proxy_set_header {{ $key }} {{ $value }}; + {{- end }} + + {{ if .Values.nginx.config.basicAuthSecretName -}} + auth_basic "Restricted Content"; + auth_basic_user_file /etc/apache2/.htpasswd; + {{- end }} + + {{- with .Values.nginx.config.serverSnippet }} + {{ tpl . $ | nindent 8 }} + {{- end }} + + location = /healthz { + # auth_basic off is not set here, even when a basic auth directive is + # included in the server block, as Nginx's NGX_HTTP_REWRITE_PHASE + # (point when this return statement is evaluated) comes before the + # NGX_HTTP_ACCESS_PHASE (point when basic auth is evaluated). Thus, + # this return statement returns a response before basic auth is + # evaluated. + return 200 'alive'; + } + + # Distributor Config + location = /ring { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /all_user_stats { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + location = /api/prom/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + ## New Remote write API. Ref: https://cortexmetrics.io/docs/api/#remote-write + location = /api/v1/push { + proxy_pass http://{{ template "cortex.fullname" . }}-distributor.{{ $rootDomain }}$request_uri; + } + + # Alertmanager Config + location ~ /api/prom/alertmanager/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /api/v1/alerts { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + location ~ /multitenant_alertmanager/status { + proxy_pass http://{{ template "cortex.fullname" . }}-alertmanager.{{ $rootDomain }}$request_uri; + } + + # Ruler Config + location ~ /api/v1/rules { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + location ~ /ruler/ring { + proxy_pass http://{{ template "cortex.fullname" . }}-ruler.{{ $rootDomain }}$request_uri; + } + + # Config Config + location ~ /api/prom/configs/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-configs.{{ $rootDomain }}$request_uri; + } + + # Query Config + location ~ /api/prom/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + ## New Query frontend APIs as per https://cortexmetrics.io/docs/api/#querier--query-frontend + location ~ ^{{.Values.config.api.prometheus_http_prefix}}/api/v1/(read|metadata|labels|series|query_range|query) { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + + location ~ {{.Values.config.api.prometheus_http_prefix}}/api/v1/label/.* { + proxy_pass http://{{ template "cortex.fullname" . }}-query-frontend.{{ $rootDomain }}$request_uri; + } + {{- if and (.Values.config.auth_enabled) (.Values.nginx.config.auth_orgs) }} + # Auth orgs + {{- range $org := compact .Values.nginx.config.auth_orgs | uniq }} + location = /api/v1/push/{{ $org }} { + proxy_set_header X-Scope-OrgID {{ $org }}; + proxy_pass http://{{ template "cortex.fullname" $ }}-distributor.{{ $rootDomain }}/api/v1/push; + } + {{- end }} + {{- end }} + } + } +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml new file mode 100644 index 0000000..bbd3a9d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-dep.yaml @@ -0,0 +1,111 @@ +{{- if .Values.nginx.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.nginx.annotations | nindent 4 }} +spec: + {{- if not .Values.nginx.autoscaling.enabled }} + replicas: {{ .Values.nginx.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.nginx.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.nginxLabels" . | nindent 8 }} + {{- with .Values.nginx.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/nginx/nginx-config.yaml") . | sha256sum }} + {{- with .Values.nginx.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.nginx.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.nginx.priorityClassName }} + priorityClassName: {{ .Values.nginx.priorityClassName }} + {{- end }} + {{- if .Values.nginx.securityContext.enabled }} + securityContext: {{- omit .Values.nginx.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.nginx.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: nginx + image: "{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + {{- if .Values.nginx.extraArgs }} + args: + {{- range $key, $value := .Values.nginx.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.nginx.extraVolumeMounts }} + {{- toYaml .Values.nginx.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + mountPath: /etc/apache2 + readOnly: true + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.nginx.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.nginx.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.nginx.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.nginx.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.nginx.resources | nindent 12 }} + {{- if .Values.nginx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.nginx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.nginx.env }} + env: + {{- toYaml .Values.nginx.env | nindent 12 }} + {{- end }} + {{- if .Values.nginx.extraContainers }} + {{ toYaml .Values.nginx.extraContainers | indent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.nginx.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.nginx.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.nginx.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.nginx.terminationGracePeriodSeconds }} + volumes: + - name: config + configMap: + name: {{ template "cortex.fullname" . }}-nginx + {{- if .Values.nginx.config.basicAuthSecretName }} + - name: htpasswd + secret: + defaultMode: 420 + secretName: {{ .Values.nginx.config.basicAuthSecretName }} + {{- end }} + {{- if .Values.nginx.extraVolumes }} + {{- toYaml .Values.nginx.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml new file mode 100644 index 0000000..b93a13d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-hpa.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.nginx.enabled .Values.nginx.autoscaling.enabled }} +{{- with .Values.nginx.autoscaling -}} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.nginxFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.nginxFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml new file mode 100644 index 0000000..51e6609 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-ingress.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.ingress.enabled .Values.nginx.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.ingress.annotations | nindent 4 }} +spec: +{{- if .Values.ingress.ingressClass.enabled }} + ingressClassName: {{ .Values.ingress.ingressClass.name }} +{{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + pathType: "Prefix" + backend: + service: + name: {{ include "cortex.nginxFullname" $ }} + port: + number: {{ $.Values.nginx.http_listen_port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml new file mode 100644 index 0000000..959764a --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (.Values.nginx.enabled) (gt (int .Values.nginx.replicas) 1) (.Values.nginx.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.nginxSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.nginx.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml new file mode 100644 index 0000000..72a2c44 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/nginx/nginx-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.nginx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.nginxFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.nginxLabels" . | nindent 4 }} + {{- with .Values.nginx.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.nginx.service.annotations | nindent 4 }} +spec: + type: {{ .Values.nginx.service.type }} + ports: + - port: {{ .Values.nginx.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.nginxSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml new file mode 100644 index 0000000..7bb3983 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/node-exporter.yaml @@ -0,0 +1,96 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: 'true' + labels: + app: node-exporter + name: node-exporter + name: node-exporter + namespace: imxc +spec: + clusterIP: None + ports: + - name: scrape + port: 9100 + protocol: TCP + selector: + app: node-exporter + type: ClusterIP +--- +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: DaemonSet +metadata: + name: node-exporter + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: node-exporter +{{- end }} + template: + metadata: + labels: + app: node-exporter + name: node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/node-exporter + name: node-exporter + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + ports: + - containerPort: 9100 + hostPort: 9100 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl new file mode 100644 index 0000000..c0a6204 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/_helpers-querier.tpl @@ -0,0 +1,23 @@ + +{{/* +querier fullname +*/}} +{{- define "cortex.querierFullname" -}} +{{ include "cortex.fullname" . }}-querier +{{- end }} + +{{/* +querier common labels +*/}} +{{- define "cortex.querierLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: querier +{{- end }} + +{{/* +querier selector labels +*/}} +{{- define "cortex.querierSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: querier +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml new file mode 100644 index 0000000..a84ba8a --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-dep.yaml @@ -0,0 +1,115 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.querier.annotations | nindent 4 }} +spec: + {{- if not .Values.querier.autoscaling.enabled }} + replicas: {{ .Values.querier.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.querier.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.querierLabels" . | nindent 8 }} + {{- with .Values.querier.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.querier.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.querier.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.querier.priorityClassName }} + priorityClassName: {{ .Values.querier.priorityClassName }} + {{- end }} + {{- if .Values.querier.securityContext.enabled }} + securityContext: {{- omit .Values.querier.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.querier.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: querier + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=querier" + - "-config.file=/etc/cortex/cortex.yaml" + - "-querier.frontend-address={{ template "cortex.fullname" . }}-query-frontend-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.grpc_listen_port }}" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.querier.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.querier.extraVolumeMounts }} + {{- toYaml .Values.querier.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.querier.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.querier.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.querier.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.querier.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.querier.resources | nindent 12 }} + {{- if .Values.querier.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.querier.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- if .Values.querier.env }} + {{- toYaml .Values.querier.env | nindent 12 }} + {{- end }} + {{- with .Values.querier.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + requests: + cpu: "100m" + {{- if .Values.querier.extraContainers }} + {{- toYaml .Values.querier.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.querier.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.querier.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.querier.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.querier.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.querier.extraVolumes }} + {{- toYaml .Values.querier.extraVolumes | nindent 8}} + {{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml new file mode 100644 index 0000000..f078526 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-hpa.yaml @@ -0,0 +1,39 @@ +{{- with .Values.querier.autoscaling -}} +{{- if .enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "cortex.querierFullname" $ }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.querierLabels" $ | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "cortex.querierFullname" $ }} + minReplicas: {{ .minReplicas }} + maxReplicas: {{ .maxReplicas }} + metrics: + {{- with .targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml new file mode 100644 index 0000000..b69de62 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.querier.replicas) 1) (.Values.querier.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.querier.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml new file mode 100644 index 0000000..c84d1a4 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.querier.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- if .Values.querier.serviceMonitor.additionalLabels }} +{{ toYaml .Values.querier.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.querier.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.querierSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.querier.serviceMonitor.interval }} + interval: {{ .Values.querier.serviceMonitor.interval }} + {{- end }} + {{- if .Values.querier.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.querier.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.querier.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.querier.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.querier.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.querier.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.querier.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml new file mode 100644 index 0000000..0701b7d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/querier/querier-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.querierLabels" . | nindent 4 }} + {{- with .Values.querier.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.querier.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.querierSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl new file mode 100644 index 0000000..c1f74c9 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/_helpers-query-frontend.tpl @@ -0,0 +1,23 @@ + +{{/* +query-frontend fullname +*/}} +{{- define "cortex.queryFrontendFullname" -}} +{{ include "cortex.fullname" . }}-query-frontend +{{- end }} + +{{/* +query-frontend common labels +*/}} +{{- define "cortex.queryFrontendLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} + +{{/* +query-frontend selector labels +*/}} +{{- define "cortex.queryFrontendSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml new file mode 100644 index 0000000..3e31d18 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-dep.yaml @@ -0,0 +1,107 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.query_frontend.annotations | nindent 4 }} +spec: + replicas: {{ .Values.query_frontend.replicas }} + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.query_frontend.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 8 }} + {{- with .Values.query_frontend.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.query_frontend.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.query_frontend.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.query_frontend.priorityClassName }} + priorityClassName: {{ .Values.query_frontend.priorityClassName }} + {{- end }} + {{- if .Values.query_frontend.securityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.query_frontend.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: query-frontend + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=query-frontend" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.frontend-memcached" . | nindent 12 }} + {{- range $key, $value := .Values.query_frontend.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.query_frontend.extraVolumeMounts }} + {{- toYaml .Values.query_frontend.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.query_frontend.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.query_frontend.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.query_frontend.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.query_frontend.resources | nindent 12 }} + {{- if .Values.query_frontend.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.query_frontend.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.env }} + env: + {{- toYaml .Values.query_frontend.env | nindent 12 }} + {{- end }} + {{- with .Values.query_frontend.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.query_frontend.extraContainers }} + {{- toYaml .Values.query_frontend.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.query_frontend.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.query_frontend.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.query_frontend.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.query_frontend.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if .Values.query_frontend.extraVolumes }} + {{- toYaml .Values.query_frontend.extraVolumes | nindent 8}} + {{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml new file mode 100644 index 0000000..2d76c6b --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.query_frontend.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- if .Values.query_frontend.serviceMonitor.additionalLabels }} +{{ toYaml .Values.query_frontend.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.query_frontend.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.query_frontend.serviceMonitor.interval }} + interval: {{ .Values.query_frontend.serviceMonitor.interval }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.query_frontend.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.query_frontend.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.query_frontend.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.query_frontend.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml new file mode 100644 index 0000000..939457c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc-headless.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml new file mode 100644 index 0000000..85ff2e8 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-frontend-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.query_frontend.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml new file mode 100644 index 0000000..5256949 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/query-frontend/query-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.query_frontend.replicas) 1) (.Values.query_frontend.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.queryFrontendLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.queryFrontendSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.query_frontend.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl new file mode 100644 index 0000000..86270d0 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/_helpers-ruler.tpl @@ -0,0 +1,30 @@ + +{{/* +ruler fullname +*/}} +{{- define "cortex.rulerFullname" -}} +{{ include "cortex.fullname" . }}-ruler +{{- end }} + +{{/* +ruler common labels +*/}} +{{- define "cortex.rulerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +ruler selector labels +*/}} +{{- define "cortex.rulerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +format rules dir +*/}} +{{- define "cortex.rulerRulesDirName" -}} +rules-{{ . | replace "_" "-" | trimSuffix "-" }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml new file mode 100644 index 0000000..8448108 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.ruler.enabled }} +{{- range $dir, $files := .Values.ruler.directories }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" $ | nindent 4 }} +data: + {{- toYaml $files | nindent 2}} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml new file mode 100644 index 0000000..a8e034d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-dep.yaml @@ -0,0 +1,191 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.ruler.annotations | nindent 4 }} +spec: + replicas: {{ .Values.ruler.replicas }} + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.ruler.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.rulerLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.ruler.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.ruler.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.ruler.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.ruler.priorityClassName }} + priorityClassName: {{ .Values.ruler.priorityClassName }} + {{- end }} + {{- if .Values.ruler.securityContext.enabled }} + securityContext: {{- omit .Values.ruler.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.ruler.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + {{- if .Values.ruler.sidecar.enabled }} + - name: {{ template "cortex.name" . }}-sc-rules + {{- if .Values.ruler.sidecar.image.sha }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}@sha256:{{ .Values.ruler.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.ruler.sidecar.image.repository }}:{{ .Values.ruler.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.ruler.sidecar.imagePullPolicy }} + env: + {{- if .Values.ruler.sidecar.watchMethod }} + - name: METHOD + value: {{ .Values.ruler.sidecar.watchMethod }} + {{ end }} + - name: LABEL + value: "{{ .Values.ruler.sidecar.label }}" + {{- if .Values.ruler.sidecar.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.ruler.sidecar.labelValue }} + {{- end }} + - name: FOLDER + value: "{{ .Values.ruler.sidecar.folder }}{{- with .Values.ruler.sidecar.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.ruler.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.ruler.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.ruler.sidecar.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.ruler.sidecar.searchNamespace }}" + {{- end }} + {{- if .Values.ruler.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.ruler.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.ruler.sidecar.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ .Values.ruler.sidecar.folderAnnotation }}" + {{- end }} + resources: + {{- toYaml .Values.ruler.sidecar.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.sidecar.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{- end }} + - name: rules + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=ruler" + - "-config.file=/etc/cortex/cortex.yaml" + {{- if .Values.configs.enabled }} + - "-ruler.configs.url=http://{{ template "cortex.configsFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}" + {{- end }} + {{- if not .Values.config.ruler.alertmanager_url }} + {{- if .Values.config.ruler.enable_alertmanager_discovery }} + - "-ruler.alertmanager-url=http://_http-metrics._tcp.{{ template "cortex.name" . }}-alertmanager-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}/api/prom/alertmanager/" + {{- else }} + - "-ruler.alertmanager-url=http://{{ template "cortex.alertmanagerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.config.server.http_listen_port }}/api/prom/alertmanager/" + {{- end }} + {{- end }} + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.ruler.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.ruler.extraVolumeMounts }} + {{- toYaml .Values.ruler.extraVolumeMounts | nindent 12}} + {{- end }} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + mountPath: {{ .Values.ruler.sidecar.folder | quote }} + {{ end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: /data + subPath: {{ .Values.ruler.persistentVolume.subPath }} + - name: tmp + mountPath: /rules + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + mountPath: /etc/cortex/rules/{{ $dir }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.ruler.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.ruler.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.ruler.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.ruler.resources | nindent 12 }} + {{- if .Values.ruler.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ruler.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.ruler.env }} + env: + {{- toYaml .Values.ruler.env | nindent 12 }} + {{- end }} + {{- if .Values.ruler.extraContainers }} + {{- toYaml .Values.ruler.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.ruler.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.ruler.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.ruler.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ruler.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: tmp + emptyDir: {} + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "cortex.rulerRulesDirName" $dir }} + configMap: + name: {{ include "cortex.rulerFullname" $ }}-{{ include "cortex.rulerRulesDirName" $dir }} + {{- end }} + - name: storage + emptyDir: {} + {{- if .Values.ruler.sidecar.enabled }} + - name: sc-rules-volume + emptyDir: {} + {{- end }} + {{- if .Values.ruler.extraVolumes }} + {{- toYaml .Values.ruler.extraVolumes | nindent 8}} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml new file mode 100644 index 0000000..52fb3e0 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.ruler.replicas) 1) (.Values.ruler.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.ruler.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml new file mode 100644 index 0000000..de6744f --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ruler.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- if .Values.ruler.serviceMonitor.additionalLabels }} +{{ toYaml .Values.ruler.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.ruler.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.rulerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.ruler.serviceMonitor.interval }} + interval: {{ .Values.ruler.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ruler.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ruler.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.ruler.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ruler.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.ruler.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml new file mode 100644 index 0000000..7752ef4 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/ruler/ruler-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.ruler.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.rulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.rulerLabels" . | nindent 4 }} + {{- with .Values.ruler.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.ruler.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.rulerSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml new file mode 100644 index 0000000..2b30599 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/runtime-configmap.yaml @@ -0,0 +1,18 @@ +{{- with .Values.runtimeconfigmap }} +{{- if .create }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cortex.fullname" $ }}-runtime-config + namespace: {{ $.Release.Namespace }} + labels: + {{- include "cortex.labels" $ | nindent 4 }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + runtime_config.yaml: | + {{- tpl (toYaml .runtime_config) $ | nindent 4 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml new file mode 100644 index 0000000..9194971 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret-postgresql.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.configsdb_postgresql.enabled .Values.configsdb_postgresql.auth.password -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }}-postgresql + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + postgresql-password: {{ .Values.configsdb_postgresql.auth.password | b64enc}} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret.yaml new file mode 100644 index 0000000..ff0e78f --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if (and (not .Values.useExternalConfig) (not .Values.useConfigMap)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cortex.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +data: + cortex.yaml: {{ tpl (toYaml .Values.config) . | b64enc }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml new file mode 100644 index 0000000..963f866 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "cortex.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} + annotations: + {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl new file mode 100644 index 0000000..3cca867 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/_helpers-store-gateway.tpl @@ -0,0 +1,23 @@ + +{{/* +store-gateway fullname +*/}} +{{- define "cortex.storeGatewayFullname" -}} +{{ include "cortex.fullname" . }}-store-gateway +{{- end }} + +{{/* +store-gateway common labels +*/}} +{{- define "cortex.storeGatewayLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} + +{{/* +store-gateway selector labels +*/}} +{{- define "cortex.storeGatewaySelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: store-gateway +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml new file mode 100644 index 0000000..1019cc8 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.store_gateway.replicas) 1) (.Values.store_gateway.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + {{- toYaml .Values.store_gateway.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml new file mode 100644 index 0000000..39eaeda --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.store_gateway.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- if .Values.store_gateway.serviceMonitor.additionalLabels }} +{{ toYaml .Values.store_gateway.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.store_gateway.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.store_gateway.serviceMonitor.interval }} + interval: {{ .Values.store_gateway.serviceMonitor.interval }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.store_gateway.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.store_gateway.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.store_gateway.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.store_gateway.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml new file mode 100644 index 0000000..0238c75 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-statefulset.yaml @@ -0,0 +1,142 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- toYaml .Values.store_gateway.annotations | nindent 4 }} +spec: + replicas: {{ .Values.store_gateway.replicas }} + selector: + matchLabels: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.store_gateway.strategy | nindent 4 }} + serviceName: {{ template "cortex.fullname" . }}-store-gateway-headless + {{- if .Values.store_gateway.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + {{- if .Values.store_gateway.persistentVolume.annotations }} + annotations: + {{ toYaml .Values.store_gateway.persistentVolume.annotations | nindent 10 }} + {{- end }} + spec: + {{- if .Values.store_gateway.persistentVolume.storageClass }} + {{- if (eq "-" .Values.store_gateway.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.store_gateway.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + accessModes: + {{- toYaml .Values.store_gateway.persistentVolume.accessModes | nindent 10 }} + resources: + requests: + storage: "{{ .Values.store_gateway.persistentVolume.size }}" + {{- end }} + template: + metadata: + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.store_gateway.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.store_gateway.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.store_gateway.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.store_gateway.priorityClassName }} + priorityClassName: {{ .Values.store_gateway.priorityClassName }} + {{- end }} + {{- if .Values.store_gateway.securityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.store_gateway.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + nodeSelector: + {{- toYaml .Values.store_gateway.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.store_gateway.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.store_gateway.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.store_gateway.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + {{- if not .Values.store_gateway.persistentVolume.enabled }} + - name: storage + emptyDir: {} + {{- end }} + {{- if .Values.store_gateway.extraVolumes }} + {{- toYaml .Values.store_gateway.extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .Values.store_gateway.extraContainers }} + {{ toYaml .Values.store_gateway.extraContainers | nindent 8 }} + {{- end }} + - name: store-gateway + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=store-gateway" + - "-config.file=/etc/cortex/cortex.yaml" + {{- include "cortex.memcached" . | nindent 12}} + {{- range $key, $value := .Values.store_gateway.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.store_gateway.extraVolumeMounts }} + {{- toYaml .Values.store_gateway.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + {{- if .Values.store_gateway.persistentVolume.subPath }} + subPath: {{ .Values.store_gateway.persistentVolume.subPath }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + - name: gossip + containerPort: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.store_gateway.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.store_gateway.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.store_gateway.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.store_gateway.resources | nindent 12 }} + {{- if .Values.store_gateway.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.store_gateway.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.store_gateway.env }} + env: + {{- toYaml .Values.store_gateway.env | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml new file mode 100644 index 0000000..c56ec77 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc-headless.yaml @@ -0,0 +1,24 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.server.grpc_listen_port }} + protocol: TCP + name: grpc + targetPort: grpc + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml new file mode 100644 index 0000000..f58019b --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/store-gateway/store-gateway-svc.yaml @@ -0,0 +1,23 @@ +{{- if eq .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.storeGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.storeGatewayLabels" . | nindent 4 }} + {{- with .Values.store_gateway.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.storeGatewaySelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml new file mode 100644 index 0000000..fc41461 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/svc-memberlist-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.fullname" . }}-memberlist + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.labels" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.config.memberlist.bind_port }} + protocol: TCP + name: gossip + targetPort: gossip + selector: + {{- include "cortex.selectorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl new file mode 100644 index 0000000..4798c6d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/_helpers-table-manager.tpl @@ -0,0 +1,23 @@ + +{{/* +table-manager fullname +*/}} +{{- define "cortex.tableManagerFullname" -}} +{{ include "cortex.fullname" . }}-table-manager +{{- end }} + +{{/* +table-manager common labels +*/}} +{{- define "cortex.tableManagerLabels" -}} +{{ include "cortex.labels" . }} +app.kubernetes.io/component: table-manager +{{- end }} + +{{/* +table-manager selector labels +*/}} +{{- define "cortex.tableManagerSelectorLabels" -}} +{{ include "cortex.selectorLabels" . }} +app.kubernetes.io/component: table-manager +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml new file mode 100644 index 0000000..d24dcc3 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-dep.yaml @@ -0,0 +1,106 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + annotations: + {{- toYaml .Values.table_manager.annotations | nindent 4 }} +spec: + replicas: {{ .Values.table_manager.replicas }} + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.table_manager.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "cortex.tableManagerLabels" . | nindent 8 }} + {{- with .Values.table_manager.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include "cortex.configChecksum" . }} + {{- with .Values.table_manager.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.table_manager.serviceAccount.name | default (include "cortex.serviceAccountName" . ) }} + {{- if .Values.table_manager.priorityClassName }} + priorityClassName: {{ .Values.table_manager.priorityClassName }} + {{- end }} + {{- if .Values.table_manager.securityContext.enabled }} + securityContext: {{- omit .Values.table_manager.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- toYaml .Values.table_manager.initContainers | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: table-manager + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-target=table-manager" + - "-config.file=/etc/cortex/cortex.yaml" + {{- range $key, $value := .Values.table_manager.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.table_manager.extraVolumeMounts }} + {{- toYaml .Values.table_manager.extraVolumeMounts | nindent 12}} + {{- end }} + - name: config + mountPath: /etc/cortex + - name: runtime-config + mountPath: /etc/cortex-runtime-config + - name: storage + mountPath: "/data" + subPath: {{ .Values.table_manager.persistentVolume.subPath }} + ports: + - name: http-metrics + containerPort: {{ .Values.config.server.http_listen_port }} + protocol: TCP + startupProbe: + {{- toYaml .Values.table_manager.startupProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.table_manager.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.table_manager.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.table_manager.resources | nindent 12 }} + {{- if .Values.table_manager.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.table_manager.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.env }} + env: + {{- toYaml .Values.table_manager.env | nindent 12 }} + {{- end }} + {{- if .Values.table_manager.extraContainers }} + {{- toYaml .Values.table_manager.extraContainers | nindent 8}} + {{- end }} + nodeSelector: + {{- toYaml .Values.table_manager.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.table_manager.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.table_manager.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.table_manager.terminationGracePeriodSeconds }} + volumes: + {{- include "cortex.configVolume" . | nindent 8 }} + - name: runtime-config + configMap: + name: {{ template "cortex.fullname" . }}-runtime-config + - name: storage + emptyDir: {} + {{- if .Values.table_manager.extraVolumes }} + {{- toYaml .Values.table_manager.extraVolumes | nindent 8}} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml new file mode 100644 index 0000000..91adabf --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-poddisruptionbudget.yaml @@ -0,0 +1,14 @@ +{{- if and (gt (int .Values.table_manager.replicas) 1) (.Values.table_manager.podDisruptionBudget) }} +apiVersion: {{ include "cortex.pdbVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + {{- toYaml .Values.table_manager.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml new file mode 100644 index 0000000..9748724 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.table_manager.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- if .Values.table_manager.serviceMonitor.additionalLabels }} +{{ toYaml .Values.table_manager.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.annotations }} + annotations: +{{ toYaml .Values.table_manager.serviceMonitor.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + endpoints: + - port: http-metrics + {{- if .Values.table_manager.serviceMonitor.interval }} + interval: {{ .Values.table_manager.serviceMonitor.interval }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.table_manager.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.table_manager.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + {{- if .Values.table_manager.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.table_manager.serviceMonitor.metricRelabelings | nindent 4 }} + {{- end }} + {{- with .Values.table_manager.serviceMonitor.extraEndpointSpec }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml new file mode 100644 index 0000000..ff3c57d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/templates/table-manager/table-manager-svc.yaml @@ -0,0 +1,23 @@ +{{- if ne .Values.config.storage.engine "blocks" -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cortex.tableManagerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cortex.tableManagerLabels" . | nindent 4 }} + {{- with .Values.table_manager.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.table_manager.service.annotations | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.config.server.http_listen_port }} + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + {{- include "cortex.tableManagerSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/cortex/values.yaml b/roles/cmoa_install/files/02-base/base/charts/cortex/values.yaml new file mode 100644 index 0000000..4a0f8c8 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/cortex/values.yaml @@ -0,0 +1,1605 @@ +image: + #repository: quay.io/cortexproject/cortex + repository: 10.10.31.243:5000/cmoa3/cortex + # -- Allows you to override the cortex version in this chart. Use at your own risk. + #tag: "" + tag: v1.11.0 + pullPolicy: IfNotPresent + + # -- Optionally specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # pullSecrets: [] + pullSecrets: + - regcred + + +# -- Kubernetes cluster DNS domain +clusterDomain: cluster.local + +tags: + # -- Set to true to enable block storage memcached caching + blocks-storage-memcached: false + +ingress: + enabled: false + ingressClass: + enabled: false + name: "nginx" + annotations: {} + hosts: + - host: chart-example.local + paths: + - / + tls: [] + +serviceAccount: + create: true + name: + annotations: {} + automountServiceAccountToken: true + +useConfigMap: false +useExternalConfig: false +externalConfigSecretName: 'secret-with-config.yaml' +externalConfigVersion: '0' + +config: + auth_enabled: false + api: + prometheus_http_prefix: '/prometheus' + # -- Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs + # which can benefit from compression. + response_compression_enabled: true + ingester: + walconfig: + wal_enabled: true + flush_on_shutdown_with_wal_enabled: true + recover_from_wal: true + lifecycler: + # -- We don't want to join immediately, but wait a bit to see other ingesters and their tokens first. + # It can take a while to have the full picture when using gossip + join_after: 10s + + # -- To avoid generating same tokens by multiple ingesters, they can "observe" the ring for a while, + # after putting their own tokens into it. This is only useful when using gossip, since multiple + # ingesters joining at the same time can have conflicting tokens if they don't see each other yet. + observe_period: 10s + # -- Duration to sleep for before exiting, to ensure metrics are scraped. + final_sleep: 30s + num_tokens: 512 + ring: + # -- Ingester replication factor per default is 3 + replication_factor: 3 + kvstore: + store: "memberlist" + limits: + # -- Enforce that every sample has a metric name + enforce_metric_name: true + reject_old_samples: true + reject_old_samples_max_age: 168h + max_query_lookback: 0s + server: + http_listen_port: 8080 + grpc_listen_port: 9095 + grpc_server_max_recv_msg_size: 10485760 + grpc_server_max_send_msg_size: 10485760 + grpc_server_max_concurrent_streams: 10000 + ingester_client: + grpc_client_config: + max_recv_msg_size: 10485760 + max_send_msg_size: 10485760 + # -- See https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config + storage: + engine: blocks + index_queries_cache_config: + memcached: + # -- How long keys stay in the memcache + expiration: 1h + memcached_client: + # -- Maximum time to wait before giving up on memcached requests. + timeout: 1s + blocks_storage: + # custume backend setting related to using s3 + backend: s3 + s3: + bucket_name: cortex-bucket + # -- The S3 bucket endpoint. It could be an AWS S3 endpoint listed at + # https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an + # S3-compatible service in hostname:port format. + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + + tsdb: + dir: /data/tsdb + bucket_store: + sync_dir: /data/tsdb-sync + bucket_index: + enabled: true + # -- https://cortexmetrics.io/docs/configuration/configuration-file/#store_gateway_config + store_gateway: + sharding_enabled: false + distributor: + # -- Distribute samples based on all labels, as opposed to solely by user and + # metric name. + shard_by_all_labels: true + pool: + health_check_ingesters: true + memberlist: + bind_port: 7946 + # -- the service name of the memberlist + # if using memberlist discovery + join_members: + - '{{ include "cortex.fullname" $ }}-memberlist' + querier: + active_query_tracker_dir: /data/active-query-tracker + # -- Maximum lookback beyond which queries are not sent to ingester. 0 means all + # queries are sent to ingester. Ingesters by default have no data older than 12 hours, + # so we can safely set this 13 hours + query_ingesters_within: 9h + # -- The time after which a metric should be queried from storage and not just + # ingesters. + query_store_after: 7h + # -- Comma separated list of store-gateway addresses in DNS Service Discovery + # format. This option should is set automatically when using the blocks storage and the + # store-gateway sharding is disabled (when enabled, the store-gateway instances + # form a ring and addresses are picked from the ring). + # @default -- automatic + store_gateway_addresses: |- + {{ if and (eq .Values.config.storage.engine "blocks") (not .Values.config.store_gateway.sharding_enabled) -}} + dns+{{ include "cortex.storeGatewayFullname" $ }}-headless:9095 + {{- end }} + query_range: + split_queries_by_interval: 24h + align_queries_with_step: true + cache_results: true + results_cache: + cache: + memcached: + expiration: 1h + memcached_client: + timeout: 1s + ruler: + enable_alertmanager_discovery: false + # -- Enable the experimental ruler config api. + alertmanager_url: 'http://alertmanager.imxc/alertmanager' + enable_api: true + # -- Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local) refer to https://cortexmetrics.io/docs/configuration/configuration-file/#ruler_config + storage: {} + runtime_config: + file: /etc/cortex-runtime-config/runtime_config.yaml + alertmanager: + # -- Enable the experimental alertmanager config api. + enable_api: true + external_url: 'http://alertmanager.imxc/alertmanager' + #external_url: '/api/prom/alertmanager' + # -- Type of backend to use to store alertmanager configs. Supported values are: "configdb", "gcs", "s3", "local". refer to: https://cortexmetrics.io/docs/configuration/configuration-file/#alertmanager_config + storage: {} + frontend: + log_queries_longer_than: 10s + # S3 사용 관련 커스텀 설정 + alertmanager_storage: + s3: + bucket_name: cortex-alertmanager + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + ruler_storage: + s3: + bucket_name: cortex-ruler + endpoint: minio.imxc.svc.cluster.local:9000 + secret_access_key: admin1234 + access_key_id: cloudmoa + insecure: true + +runtimeconfigmap: + # -- If true, a configmap for the `runtime_config` will be created. + # If false, the configmap _must_ exist already on the cluster or pods will fail to create. + create: true + annotations: {} + # -- https://cortexmetrics.io/docs/configuration/arguments/#runtime-configuration-file + # 설정부 + runtime_config: {} +alertmanager: + enabled: true + replicas: 1 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful for using a persistent volume for storing silences between restarts. + enabled: false + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log level (debug, info, warn, error) + extraArgs: {} + # -experimental.alertmanager.enable-api: "true" + # -alertmanager.web.external-url: /alertmanager + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + + persistentVolume: + # -- If true and alertmanager.statefulSet.enabled is true, + # Alertmanager will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Alertmanager data Persistent Volume Claim annotations + annotations: {} + + # -- Alertmanager data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Alertmanager data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Alertmanager data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Alertmanager data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + # -- Tolerations for pod assignment + # ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + # -- If not set then a PodDisruptionBudget will not be created + podDisruptionBudget: + maxUnavailable: 1 + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 60 + + # -- Init containers to be added to the cortex pod. + initContainers: [] + + # -- Additional containers to be added to the cortex pod. + extraContainers: [] + + # -- Additional volumes to the cortex pod. + extraVolumes: [] + + # -- Extra volume mounts that will be added to the cortex container + extraVolumeMounts: [] + + # -- Additional ports to the cortex services. Useful to expose extra container ports. + extraPorts: [] + + # -- Extra env variables to pass to the cortex container + env: [] + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # -- skipTlsVerify Set to true to skip tls verification for kube api calls + skipTlsVerify: false + enableUniqueFilenames: false + enabled: false + label: cortex_alertmanager + watchMethod: null + labelValue: null + folder: /data + defaultFolderName: null + searchNamespace: null + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +distributor: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + -validation.max-label-names-per-series: "45" + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - distributor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the distributor pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 60 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +ingester: + replicas: 3 + + statefulSet: + # -- If true, use a statefulset instead of a deployment for pod management. + # This is useful when using WAL + enabled: true + # -- ref: https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down and https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies for scaledown details + podManagementPolicy: OrderedReady + + service: + annotations: {} + labels: {} + + serviceAccount: + name: + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - ingester + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 30 + targetMemoryUtilizationPercentage: 80 + behavior: + scaleDown: + # -- see https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down for scaledown details + policies: + - type: Pods + value: 1 + # set to no less than 2x the maximum between -blocks-storage.bucket-store.sync-interval and -compactor.cleanup-interval + periodSeconds: 1800 + # -- uses metrics from the past 1h to make scaleDown decisions + stabilizationWindowSeconds: 3600 + scaleUp: + # -- This default scaleup policy allows adding 1 pod every 30 minutes. + # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + policies: + - type: Pods + value: 1 + periodSeconds: 1800 + + lifecycle: + # -- The /shutdown preStop hook is recommended as part of the ingester + # scaledown process, but can be removed to optimize rolling restarts in + # instances that will never be scaled down or when using chunks storage + # with WAL disabled. + # https://cortexmetrics.io/docs/guides/ingesters-scaling-up-and-down/#scaling-down + preStop: + httpGet: + path: "/ingester/shutdown" + port: http-metrics + + persistentVolume: + # -- If true and ingester.statefulSet.enabled is true, + # Ingester will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: true + + # -- Ingester data Persistent Volume Claim annotations + annotations: {} + + # -- Ingester data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Ingester data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Ingester data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Ingester data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: exem-local-storage + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + startupProbe: {} + + # -- Startup/liveness probes for ingesters are not recommended. + # Ref: https://cortexmetrics.io/docs/guides/running-cortex-on-kubernetes/#take-extra-care-with-ingesters + livenessProbe: {} + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + statefulStrategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +ruler: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + #-ruler.configs.url: http://cortex-configs:8080 + #-ruler.alertmanager-url: http://cortex-alertmanager:8080 + -ruler.storage.type: configdb + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + # -- allow configuring rules via configmap. ref: https://cortexproject.github.io/cortex-helm-chart/guides/configure_rules_via_configmap.html + directories: {} + + # -- Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders + sidecar: + image: + repository: 10.10.31.243:5000/cmoa3/k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + # requests: + # cpu: 50m + # memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + enabled: false + # -- label that the configmaps with rules are marked with + label: cortex_rules + watchMethod: null + # -- value of label that the configmaps with rules are set to + labelValue: null + # -- folder in the pod that should hold the collected rules (unless `defaultFolderName` is set) + folder: /tmp/rules + # -- The default folder name, it will create a subfolder under the `folder` and put rules in there instead + defaultFolderName: null + # -- If specified, the sidecar will search for rules config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # -- If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + +querier: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - querier + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the querier pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +query_frontend: + replicas: 2 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - query-frontend + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + lifecycle: {} + +table_manager: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +configs: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: + # -configs.database.migrations-dir: /migrations + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /ready + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /ready + port: http-metrics + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 180 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +nginx: + enabled: true + replicas: 2 + http_listen_port: 80 + config: + dnsResolver: coredns.kube-system.svc.cluster.local + # -- ref: http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size + client_max_body_size: 20M + # -- arbitrary snippet to inject in the http { } section of the nginx config + httpSnippet: "" + # -- arbitrary snippet to inject in the top section of the nginx config + mainSnippet: "" + # -- arbitrary snippet to inject in the server { } section of the nginx config + serverSnippet: "" + setHeaders: {} + # -- (optional) List of [auth tenants](https://cortexmetrics.io/docs/guides/auth/) to set in the nginx config + auth_orgs: [] + # -- (optional) Name of basic auth secret. + # In order to use this option, a secret with htpasswd formatted contents at + # the key ".htpasswd" must exist. For example: + # + # apiVersion: v1 + # kind: Secret + # metadata: + # name: my-secret + # namespace: + # stringData: + # .htpasswd: | + # user1:$apr1$/woC1jnP$KAh0SsVn5qeSMjTtn0E9Q0 + # user2:$apr1$QdR8fNLT$vbCEEzDj7LyqCMyNpSoBh/ + # + # Please note that the use of basic auth will not identify organizations + # the way X-Scope-OrgID does. Thus, the use of basic auth alone will not + # prevent one tenant from viewing the metrics of another. To ensure tenants + # are scoped appropriately, explicitly set the `X-Scope-OrgID` header + # in the nginx config. Example + # setHeaders: + # X-Scope-OrgID: $remote_user + basicAuthSecretName: "" + + image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: 1.21 + pullPolicy: IfNotPresent + + service: + type: ClusterIP + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: {} + + nodeSelector: {} + affinity: {} + annotations: {} + persistentVolume: + subPath: + + startupProbe: + httpGet: + path: /healthz + port: http-metrics + failureThreshold: 10 + livenessProbe: + httpGet: + path: /healthz + port: http-metrics + readinessProbe: + httpGet: + path: /healthz + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: false + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + + terminationGracePeriodSeconds: 10 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + + autoscaling: + # -- Creates a HorizontalPodAutoscaler for the nginx pods. + enabled: false + minReplicas: 2 + maxReplicas: 30 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 0 # 80 + # -- Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior + behavior: {} + +store_gateway: + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - store-gateway + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true Store-gateway will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- Store-gateway data Persistent Volume Claim annotations + annotations: {} + + # -- Store-gateway data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # -- Store-gateway data Persistent Volume size + size: 2Gi + + # -- Subdirectory of Store-gateway data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- Store-gateway data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +compactor: + enabled: true + replicas: 1 + + service: + annotations: {} + labels: {} + + serviceAccount: + # -- "" disables the individual serviceAccount and uses the global serviceAccount for that component + name: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + relabelings: [] + metricRelabelings: [] + # -- Additional endpoint configuration https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + extraEndpointSpec: {} + + resources: {} + + # -- Additional Cortex container arguments, e.g. log.level (debug, info, warn, error) + extraArgs: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod Annotations + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + + nodeSelector: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - compactor + topologyKey: 'kubernetes.io/hostname' + + annotations: {} + + persistentVolume: + # -- If true compactor will create/use a Persistent Volume Claim + # If false, use emptyDir + enabled: false + + # -- compactor data Persistent Volume Claim annotations + annotations: {} + + # -- compactor data Persistent Volume access modes + # Must match those of existing PV or dynamic provisioner + # Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + accessModes: + - ReadWriteOnce + + # compactor data Persistent Volume size + size: 2Gi + + # -- Subdirectory of compactor data Persistent Volume to mount + # Useful if the volume's root directory is not empty + subPath: '' + + # -- compactor data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. + storageClass: null + + startupProbe: + failureThreshold: 60 + initialDelaySeconds: 120 + periodSeconds: 30 + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + + securityContext: {} + containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + + strategy: + type: RollingUpdate + + terminationGracePeriodSeconds: 240 + + tolerations: [] + + podDisruptionBudget: + maxUnavailable: 1 + + initContainers: [] + extraContainers: [] + extraVolumes: [] + extraVolumeMounts: [] + extraPorts: [] + env: [] + +# -- chunk caching for legacy chunk storage engine +memcached: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index read caching for legacy chunk storage engine +memcached-index-read: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +# -- index write caching for legacy chunk storage engine +memcached-index-write: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-frontend: + enabled: false + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-index: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks: + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +memcached-blocks-metadata: + # enabled/disabled via the tags.blocks-storage-memcached boolean + architecture: "high-availability" + replicaCount: 2 + resources: {} + extraEnv: + # -- MEMCACHED_CACHE_SIZE is the amount of memory allocated to memcached for object storage + - name: MEMCACHED_CACHE_SIZE + value: "1024" + # -- MEMCACHED_MAX_CONNECTIONS is the maximum number of simultaneous connections to the memcached service + - name: MEMCACHED_MAX_CONNECTIONS + value: "1024" + # -- MEMCACHED_THREADS is the number of threads to use when processing incoming requests. + # By default, memcached is configured to use 4 concurrent threads. The threading improves the performance of + # storing and retrieving data in the cache, using a locking system to prevent different threads overwriting or updating the same values. + - name: MEMCACHED_THREADS + value: "4" + metrics: + enabled: true + serviceMonitor: + enabled: false + +configsdb_postgresql: + enabled: true + uri: postgres://admin@postgres/configs?sslmode=disable + auth: + password: eorbahrhkswp + existing_secret: + name: + key: diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/.helmignore b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/Chart.yaml new file mode 100644 index 0000000..be38643 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: 7.6.0 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +icon: https://helm.elastic.co/icons/elasticsearch.png +maintainers: +- email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +sources: +- https://github.com/elastic/elasticsearch +version: 7.6.0 diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml new file mode 100644 index 0000000..2631417 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/1.headless_service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch-headless + labels: + app: elasticsearch +spec: + clusterIP: None + selector: + app: elasticsearch + ports: + - name: transport + port: 9300 diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml new file mode 100644 index 0000000..505cc5a --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/2.service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: imxc + name: elasticsearch + labels: + app: elasticsearch +spec: + selector: + app: elasticsearch + ports: + - name: http + port: 9200 + targetPort: 9200 +# nodePort: 30200 +# type: NodePort + type: ClusterIP diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml new file mode 100644 index 0000000..ee0a42d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/3.configmap.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: imxc + name: elasticsearch-config + labels: + app: elasticsearch +data: +# discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch", "elasticsearch-2.elasticsearch"] +# cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1", "elasticsearch-2"] +# ES_JAVA_OPTS: -Xms8g -Xmx8g + elasticsearch.yml: | + cluster.name: imxc-elasticsearch-cluster + network.host: ${POD_NAME} + discovery.seed_hosts: ["elasticsearch-0.elasticsearch", "elasticsearch-1.elasticsearch"] + cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1"] + xpack.ml.enabled: false + xpack.security.enabled: true + xpack.security.transport.ssl.enabled: true + xpack.security.transport.ssl.verification_mode: certificate + xpack.security.transport.ssl.client_authentication: required + xpack.security.transport.ssl.keystore.path: elastic-certificates.p12 + xpack.security.transport.ssl.truststore.path: elastic-certificates.p12 + xpack.security.transport.filter.enabled: true + xpack.security.transport.filter.allow: _all + xpack.security.http.ssl.enabled: true + xpack.security.http.ssl.keystore.path: http.p12 + node.ml: false + cluster.routing.rebalance.enable: "all" + cluster.routing.allocation.allow_rebalance: "indices_all_active" + cluster.routing.allocation.cluster_concurrent_rebalance: 2 + cluster.routing.allocation.balance.shard: 0.3 + cluster.routing.allocation.balance.index: 0.7 + cluster.routing.allocation.balance.threshold: 1 + cluster.routing.allocation.disk.threshold_enabled: true + cluster.routing.allocation.disk.watermark.low: "85%" + cluster.routing.allocation.disk.watermark.high: "90%" + cluster.routing.allocation.disk.watermark.flood_stage: "95%" + thread_pool.write.queue_size: 1000 + thread_pool.write.size: 2 + ES_JAVA_OPTS: -Xms8g -Xmx8g diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml new file mode 100644 index 0000000..5a53f57 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/4.pv.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-0 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-elasticsearch-cluster-1 + labels: + type: local + app: elasticsearch +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.ELASTICSEARCH_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: elasticsearch-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-elasticsearch-cluster-2 +# labels: +# type: local +# app: elasticsearch +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.ELASTICSEARCH_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: elasticsearch-storage +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: +# - {{ .Values.global.ELASTICSEARCH_HOST3 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml new file mode 100644 index 0000000..a4ae2db --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/5.pvc.yaml @@ -0,0 +1,53 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: elasticsearch-data-elasticsearch-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: elasticsearch-storage + selector: + matchLabels: + type: local + app: elasticsearch +--- +#kind: PersistentVolumeClaim +#apiVersion: v1 +#metadata: +# namespace: imxc +# name: elasticsearch-data-elasticsearch-2 +#spec: +# accessModes: +# - ReadWriteOnce +# volumeMode: Filesystem +# resources: +# requests: +# storage: 30Gi +# storageClassName: elasticsearch-storage +# selector: +# matchLabels: +# type: local +# app: elasticsearch \ No newline at end of file diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml new file mode 100644 index 0000000..2cbd4b8 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/6.statefulset.yaml @@ -0,0 +1,146 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: apps/v1beta1 +{{- end }} +kind: StatefulSet +metadata: + namespace: imxc + name: elasticsearch +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: elasticsearch +{{- end }} + serviceName: elasticsearch + replicas: 2 #3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: elasticsearch + spec: + securityContext: + fsGroup: 1000 + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - elasticsearch + topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: elastic-node + operator: In + values: + - "true" + initContainers: + - name: init-sysctl + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + #command: ["sysctl", "-w", "vm.max_map_count=262144"] + command: ["/bin/sh", "-c"] + args: ["sysctl -w vm.max_map_count=262144; chown -R 1000:1000 /usr/share/elasticsearch/data"] + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + containers: + - name: elasticsearch + resources: + requests: + cpu: 1000m + memory: 16000Mi #32000Mi + limits: + cpu: 2000m + memory: 16000Mi #32000Mi + securityContext: + privileged: true + runAsUser: 1000 + capabilities: + add: + - IPC_LOCK + - SYS_RESOURCE + image: {{ .Values.global.IMXC_IN_REGISTRY }}/elasticsearch:{{ .Values.global.ELASTICSEARCH_VERSION }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ES_JAVA_OPTS + valueFrom: + configMapKeyRef: + name: elasticsearch-config + key: ES_JAVA_OPTS + # log4j patch + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: ELASTIC_USERNAME + value: {{ .Values.global.CMOA_ES_ID }} + - name: ELASTIC_PASSWORD + value: {{ .Values.global.CMOA_ES_PW }} + readinessProbe: + httpGet: + scheme: HTTPS + path: /_cluster/health?local=true + port: 9200 + httpHeaders: + - name: Authorization + # encode base64 by elastic:elastic + value: Basic ZWxhc3RpYzplbGFzdGlj + initialDelaySeconds: 5 + ports: + - containerPort: 9200 + name: es-http + - containerPort: 9300 + name: es-transport + volumeMounts: + - name: elasticsearch-data + mountPath: /usr/share/elasticsearch/data + - name: elasticsearch-config + mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + subPath: elasticsearch.yml + - name: es-cert-certificate + mountPath: /usr/share/elasticsearch/config/elastic-certificates.p12 + subPath: elastic-certificates.p12 + - name: es-cert-ca + mountPath: /usr/share/elasticsearch/config/elastic-stack-ca.p12 + subPath: elastic-stack-ca.p12 + - name: es-cert-http + mountPath: /usr/share/elasticsearch/config/http.p12 + subPath: http.p12 + volumes: + - name: elasticsearch-config + configMap: + name: elasticsearch-config + items: + - key: elasticsearch.yml + path: elasticsearch.yml + - name: es-cert-certificate + secret: + secretName: es-cert + - name: es-cert-ca + secret: + secretName: es-cert + - name: es-cert-http + secret: + secretName: es-cert + volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: elasticsearch-storage + resources: + requests: + storage: 10Gi diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml new file mode 100644 index 0000000..2a24b92 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/7.secrets.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +data: + elastic-certificates.p12: MIINbwIBAzCCDSgGCSqGSIb3DQEHAaCCDRkEgg0VMIINETCCBW0GCSqGSIb3DQEHAaCCBV4EggVaMIIFVjCCBVIGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRrCrEWs79GCUPrYkFrkDBEF9uz4gIDAMNQBIIEyJUjcP339Anee6bdJls469HbsqYGgzidG41xto7ignNdZdP9LTTca+w8sN8tbVnTUZi4kQYcPSQqv+cWobi66KpgvQ7HhA/YE9K5L7wR7KEj0o61LYvucHm19hRdt788EvBy4mi8cDAr3m49NNuLUM6wyeCEKr2W2dwZFIyxFTPVv6/ef6cuHyDNLXJtjUmOIzNDL8Olqk8JGAd9bwXlizcShfmbiHHX8pAhK0u9JThFQePvCGiKA4LwzeuuwuEniznMlUQ4T/TjLjLLYcoS4vktfOJKPOgL3esjsc5hPoVgbw+ZpNCxRq1RVs/5eOBkxzXhJ7hdNELJDcMjitBfl71MlSDtMV4FhlVuhjilsuHx6URucsEE2l1V3asg4QP1PoSiACqncr2WhCcrKu0d8DztlIkCYG7D8oiAx4nEzsm0xmOhIcigHw6GP4MNeCieJCgAwLkJf1m73IYcxyaKsJAc57jfs9ue62KkVHL2NxNRjTps2j0Cl5NJQRE4CTkieU0etsNS1nJEwiJunVTyHXAa53MF6j40awEqs2Ko4gQENPpuQc599yJb+ZTHfHPe8bpfrmnxiEAaeiABu+OVH9bdLK5gtCyD5vXGZKVtHbyR+0+UlBggw/horFQIP+x7SKO53+ho0iCnYyQK52kJiv93JNgStGHpxf1SkPTtWHOraR2qSZTX6F7vjBtIq3Y6ocb6yo/jMNhzk3spHdz+F99S6uV3NLmDfX2vJmu1YSaPwaNZGDggcFI/g2S5ylBWyHpk2rB5gtklUIQEWxFFvbFOp37ffcdC0mZ6SgpOxj+IxuVLqTvyDLjrfteEvfjRAFXsT8E4XikC8QKjQ+KAwDYETidOiYB0/ByCh7t1KbcKJWU8XYxqzukX88CyVtO9Lp/f97x3ycvaF1UfzLBrm/bnTa0jPEP2/OdzpbjQJcEGX64+QY92k38zjPe4tedUz5H/C9aw8Q8r/DSxUhn2sdDXssR9jytITLLOJHDJX7XCfZxtoW60bwRm5MyXc4bJmjZT2BgxTWIVokaOhk0IZwpbC/oxh1QkaHBioP6+slASXg8Xu9l+mACevb1b9RvpN+fhurW2wOHl4Kul775BCohuTtiqKAce8KEACwncwYz+ZfcPTkbLRy6+p6NI3zNWpZE+iFlPtLh+2+T/QQHEfKTNUxcXLt8WCMOZuCe776T41nY8UhbUQJKqlEvom3MzCcsvFBoahlpjv+rg9/Ay7ESMil49e2x3qbD2929X0BHz//RcvPO5fvSEK/tC2uHzWzqHf0ZaRwtO19Z95Uv3GjGNF0SO8qri830LfJ+ctjk320qLyZmxA9QgPoI2oMHSxkaX1fgVeiN9coBM8yJbPK8ZdOOg4abnYOhqrTJXaoSFo+SYyAVZoTiQIIk/JScL5Qcw9IJw6sSKmOdChy2spYQKeo1NU9ecLD8YRBqRP0EET7e7NDPKlIWQ1vB5y2hokyL7bxvbGgzqQBAyo9wKJ3v1g4IYEWA9mluvQapOMVEHBYh6wv2nTJpE9EqMxpYQBU1w+vgX0EUgZDEOBkbvd5wubAeERt0mJqjea6vxWJIbeqMVIIoJSZEDaPE5qVNYaosoc8yvAZ9+U3lZlZObHzHEAIUx/2pP/jFEMB8GCSqGSIb3DQEJFDESHhAAaQBuAHMAdABhAG4AYwBlMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE4MTk0NzgwggecBgkqhkiG9w0BBwagggeNMIIHiQIBADCCB4IGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFP43u2ii0k7JTUfInMhUBwjWZrS/AgMAw1CAggdItHB4SBc5KdDVc8eXuF8Ex1WP/Y2wz76PoNNpYm2LeIVozsp5c/2RDN2KqhcvhTihlY44esqWWVCOx+OTwmAPFwzZSrMaOYpcOP3fRWaHJLw98cK8a1ZuNv3eXWecf333TrsvU/bpT3v0KNO915qnSbtNwlvXaOMm6jbw6eBnkB7i6jxA7kgVAW6soa3ZHOrV78quBSbAjXZddHsI8x3MS4rxdvkp6GHet22/fQxjxz8UlQEDqzQgK7F4RqULRJeU//JID7VJqfbHRHfnYsKszsirhWKeJsxLVhG1VU/zRgxs0C35NfQeR/o7jmFpE7CCvvC0Rea2pybNojb51HLvyycXtpGn0gAdTBVNnwK1X58uSDWH7jM61uX9f+/gcDZqlUj6UVc6mzqxAgzDtf6B32G0VQq2szaJjbRVEVXhCAOIdVj6pRpI3l3gRv8OkNAWsGwYDMjeFxnrEpw1AQkEj7FRgI6iNOxEfUhOVYIEsflGTUdcd+K+zlCCHAJoMzbqiwPyHHgvLOp04A7fog+H3/cn6Tdmrp/J7TxpaW1ZwwcHtTRLoq0F77Sj8XJule3CzaDtg6IBen/Yo7H9hhK3ORodlGjJYA285dHAd1mtqmHmoWeDNoVrlVyymge78yXGmlFsBWF83VUChRx+9noF3Zhz+QMPBNsKHk4TM9yRHiWpMZIdkEZKq+obCPU2PmC21wnWx13nhb88gaNyBjHxFsGE91SgEyQh/cPhi01Y7+yNYQvYOXJe3EQ6oqFCBkPUnrbAMiHDP//AVN/tUrgVbmpIclfFprP2YIRcfGa7qch48RFbmhnX5N/OYLaPnNYdbxOiwZ0f/KIpDKWS67kS2N+jDKWs/SCLs2g89q1z2EGvbVwKMD6Vl559EZxAfNRv+eZu0MvTejEkuykIHJpXCyP+8EphUyWW9Cqll1ux4rXMUDkgl5sh1WgSoIEASX2j5TJ3fIh0nBkjAkBi0n2BINZgVWKj9U1zHNdRF67Eb+97lUuY6JIkbFhLSgZiIZqnI9bnW8OKUJFtvVtlSKG4xqdOeAroB8GLw2iR/GjF2Dvy4rIZo+qeTCIN+bm+iFkCri7L2K0/KR25h7bAtXwBxwMct5F4A1vltlLs408efMRJ7dg3iqMGhRyXdwxKexWJLbp02uJQVU9/ogYeLfSiIZEm25qjEMQZqRpQpwLaH5JB9oLKqdLEdeuxOfqb6weHDOtITlFHToeRNzIEmbiT9gbdpMwKTxs/rtwMHgGU6kIJmIFgnw2gauKvpiIuDCY79JpSNipsicvvLTIa4cc8sZCCllZ1wAmbNDsCH6p0bh8CooMjGf2vUbRClSe9+R19/lRMFGSp4N6fElW7MxNw85xpkFjG0s053fvIJmfPhxVqUHMP3fFQv0DUvvQNvNTsRGdDjohkC0095v9EWy7n9Frv2wIM2G7uVHvrlgkQfPK2JsYZKsUE0KXa4HUQptWL71kp7RQSmOmXFzsthjYVXu/pfXA+u+PAtHvQpo1nTPreXn3UZqiEiQmNkmMPLAYzpIi35tjNewfw5XwDj77pqH5OFcMZDTKbiInV1LuvFlKxCEYh4gvTThC0XTsrsiHgldtNcw9ZB017uPW9AAqbj2IB0d5b0ZB3yMZ67uzt1pretcxmEfSoA64QWOC9lBYp4DVE9QxcCnsSgibWreqpdJHmX5MR4umwIb6WaM1pJdCY1bW4tO3ZVT4DA/4ry7jqxUH4AcZRNK0zYR6DAtZndB7LTJhT+8d5EBtmAHzC5HT9KLmHV6mAG1QLMlwhNXmtM0YCJsKxcZo+xLBy/2cHl41EU4ACiuEq1JrM5j9fQk+hmJHT+JB0aqv+kvdxGmgBuVWGHQBtNTV6TYeLzqzDpIl9uXi3qFKFBuTQOska2zAMv7gLOe79w1cVb/SJKdcYjWtLR0v6wfaRgVeBwLvTvh7nNXhXRqKfQKe3e2Tjgq4nV4kOQHI21WDKGSd4ONyyvXGMwNzRgcZwpDFAcvshZATwaBtAo4JWi6D3vJB6H1PHRtyqHjErKkPazoZMjR2sZI8S4BMo4R5fa1ZztZO4p2lJYUIAQHj872UdGXHTXgyZKU8t/ifiVfxon5UtZJRi0Xq5OMdN//Qtq2kVwQxntf0eWsygkKMtNr1XLzu0TAMUMItnohdQWUw5w8UeXYOAYfZFqZEhKfcwkJsfq1q56ptzVBI3T2hDFM7xuVFNn5y+FCTx9pB9FCbln/3ZlKuUiTH/eLMKdQYGkRX4X0qzkx3YqAn6jDLQPEG3Rz0JP53T43uLxGpqa8+jn1XIUCNj50mqZGiah7bdo1qsDHbFWYCe7uoOjPapontpaoEQaZog1INqBNerS19a+i4S0/uAsGApykwUhk/zGfr9UudpKJWd7AznlF3+yfZfk/9mCSajBpoWafCIWmOvxJD77L86YAs9STuhWUGQvL2rxPf2uyS4WAi2+DgbdrGTSiwNB/1YX8iHp/cw6DA+MCEwCQYFKw4DAhoFAAQUSvLiFrAQlmfgL3Cewez5Fw2+0okEFH+RyXvcJHVaYbaqjejrXkgUS0JsAgMBhqA= + elastic-stack-ca.p12: MIIJ2wIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCBWEGCSqGSIb3DQEHAaCCBVIEggVOMIIFSjCCBUYGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBTQSr5nf5M77CSAHwj38PF//hiFVgIDAMNQBIIEyBrOipz1FxDRF9VG/4bMmue7Dt+Qm37ySQ/ZfV3hFTg6xwjEcHje6hvhzQtFeWppCvd4+7U/MG8G5xL0vfV5GzX1RhVlpgYRfClqMZo3URqBNu6Y5t3sum+X37zbXQ1GI6wo3YURStZkDHlVtObZB667qqj5rO4fIajzRalaxTFda8aS2xAmQklMcCEXASsO5j0+ufVKiOiG2SIEV2LjjYlUymP7d9+LAZ2I6vR+k/jo2oNoPeq0v68qFd9aOB2ojI9Q/PDFA7Nj1kKMK7KjpxGN5/Ocfr8qrxF1mviA6rPdl8GV3WCFMFKcJER4fRmskWGNE/AdwU3laXvJux/qz4rjiYoJX+5rSyXBDxdznaFiSyN1LYkFJ+nao6HSAmPPyfEPVPRICc6XHMUM4BZOVlJO49M1xg7NFQUtkyVm8+ooDwXCiGEUHDZNw+hCcuUewp0ZXki695D0tESnzi3BE56w7CRySeaNR8psAtL74IUtov9I66GlBEI7HSbyLTT9Fa7+o+ElJWnFqIyW8WzNF3T5fvRv2LfKjYO5KiISlOM03KlETWE1F60TZqW3EbP9WjLhRnovFcJVsNyha+wDVTu44DAylMX4Oh2xKYm2YW+Oi0aeCFmJbDp/TlxYhm5ACYUxma6CVxbEgHkxwjWyFfiNQp2MBL/5HFJGxuny2lVnN8yUSCvDdnOlVTB36/EByY/oA8S+GF/QRYd3PMew56s7aBgPt8mhncN5Cdm+GCD/Nb/ibcuTId9HAaT6o3wMsc7bYusjHGCjFbz9fEdU2MdpLJO+FXVM9E1sEKoTpPLeJDh2a9RUWJQPUCLu8MgEdiJohtEpOtvM7y5+XbuAkYaDsBw3ym5M/kwovN09X1m5x5qM0QSRIVKHf1qo6wo68VMeVQDEBNxJ5/tuZ11qE3siGRfwDnUkCpb9H54+w3zaScPHGAdwplYYwaqnFMwi8nFMtjZvGOLT2wqPLPnKVeQGt4TCVWPXuB4kYnmbTWoJbUT5Wpurcnyn8l6uzLmypCD4k8YiQoDb1b9HIFUAypn580KIUF19eCSGeIHl4hbmusuISxQ1qXk7Ijbj7PiVtMKy5h8rG/c57KJvfvnMQy9hauM5kcZmlTUvrHDw+7cUFB96/wXbvqmcPKGKutgXRqHcTYyBOPEJnSUMBIM2r59wgFjlMuQLrJurzwzox/IEKu/KMilIBDp4k+MHz6NrINWfbV7xa6yAja1kWyvUmwYjCHhlXZmhCb2fmhP1lsnN4BNAkDsdfxHBRCBISy6fuHSY+c4RsokxZ4RomHhVvJsEY/AE4DCvVXDunY8t4ARrQCqXYso3+kVjm6+aelKk+KgyLZ3St0eAIl/Y2xqEXgh0wHGrx3CLZqGqq864f5MmrxiytmlSzHP4RSad20drsN3VchaJZkyrGbKEs6ZJDU2dq5NiC5unqx5tLw6XNRTydIC2PaiVl9m3GLUCh6hQSRJnvcXrqOd8a9K1uV5OoA3TRdc2V5lyxWRIJsdK5KfiAiTsNeM+Tt+Dh2pZjt2l2h4n4BjgYApxG8u10BP1iZ1e1OsCRgLGbgiuXtXrlrjwvJzrB5i11oy9mt3vqgtbjAciQpsQYGGfnVqyGXfEc55hIYWClNAFZDE4MBMGCSqGSIb3DQEJFDEGHgQAYwBhMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2NTM5NzE3OTU1MTUwggQUBgkqhkiG9w0BBwagggQFMIIEAQIBADCCA/oGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFEVjuzIvhFF9BzWGr3Ee4cw/mLcqAgMAw1CAggPAwroH+zLRt2Jtb8IWeOaIbXAv4sVGUljreWkJE8dkoXNcEQpATEt5H7L4uwnDsevLi1yfWtUDN1OxM8gb7iR4Jysrd+8uM1r0nn9YStz/I3qhN9Fb6yAb+ENTCzwo/oAnyDBM/lXR9fL0EPHRfsDmK+6kC+hZ4AZIao+1oWRD0Bu970yK6gwv7TIRCsS/RBZfC/d4Slz1+IQChiWS4ttTzxK/IuhaFbia0JYtUpjmMGMBQwYRyvITgYpOIct39Il/mabQ4BA1/wk7Oecfe3RHzIfM49AxJtwKppfVfaRJjtK1aoO/GKS6CZuvIIX8q3Mt32OEaoRN9FJM9EkUkKCcYhtRfq0/8MTO97MbrcKeO8XICn8vZwOMM7k7IFtCq44/3QBXa9fpc2BFMVYOoQ22W2ZuMNMRp6OYc6Da1BG4Ik9mt1T4k9NkvfrhpNceR27v6Q0pZNUTN26aPr11/SfS/IZmLGXF7cGAfxITMOQwK2ig6qivXzvwLxfnyW4aHF7K/jL59kDg9Vf9zKmlvPJpHSEWv53U9SFYvvrMISd6E8np0bHRM5p49mgH/KXGauRRaLWUxlBwrhjeZRimTF9x//a0luGf5tIW8ymi32wn8LNiu7fbnkldnivfgWVmktNrPMH+70HNlCWkfaNibSHpzyDQRTzg9PjHEcFH+pQAXCc+A8y8FSvlT+nx9dpXXRK5pqbrGnWyrm5D3oY1ceO0E85R9Fx4Ss0f+mMBtNDYpz7zS5BSX36MNn0gm6MkhlOVbbcAob4WbZAEM7zaiV1ilLegXPZYPCGQydN02Q+lJ7HHZ18T4mzTrjF6M1PFIx31cR1r0ZtJhkCrOWdlTrmovvYYEgEStsiE3pi6dW4v1NgcJVevpnJJ//vpGXasH9Ue/ZNdk1tj/h7cQ/qbKlmvrcuH/UQ969RsNX+K3B1xeYnfbV88BXqFLuqhuWy38wwvBvKO37vq+ioPNIjwaIyCVzoF9/MAx2aNOdk/x04mSNVYh5q0ZKv+3JC3W2vJxV2aonc/ybFgi2GZz2erVYNZTSXz+bEefx8QWzcW6/zr437jh/peQRyQ92PsN+eZV9GB2lrwmF7K2579vNQoVcpzTvTFf+eZZhF8u/1HZW4uFHRUyqE3rHyOukSFukD7XWnFL1yUcWw/SGNIm1HNZD3nXjqcwdAIXl7OvqdO0z/Qt2bny6KpOSJqjMUjB5AX5/yt2xlZBDhlsoGtRfbSWefGf7qTdpg2T9+ClMb7vS1dLzrGRzNgGc7KO2IQdkNcfj+1MD4wITAJBgUrDgMCGgUABBSoZ3hv7XnZag72Gq3IDQUfHtup5gQUHZH4AQTUUCeOS0WnPOdFYNvm1KUCAwGGoA== + http.p12: MIINZwIBAzCCDSAGCSqGSIb3DQEHAaCCDREEgg0NMIINCTCCBWUGCSqGSIb3DQEHAaCCBVYEggVSMIIFTjCCBUoGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBRl7KAO2Y5ZolA3Si0i+pNdXpn42AIDAMNQBIIEyE9fBFRMMy358/KJQcAD9Ts0Xs0TR0UEl/an+IaNTz/9doU6Es6P22roJUK8j4l09I8ptGGKYdeGzrVBzWEjPhGAZ3EXZPHi2Sr/QKbaiWUnYvqqbPVoWNLukrPvK5NpEyPO2ulfxXN46wHzQMnk5l+BjR4wzqKquxgSzacXRJCqznVj59shjLoTK9FtJ3KVEl+JfukcAh/3EqkP7PRAXrPeQ5UcvYbYMZgxw8xHYg/sdKqyHBxwQqNtvGlfGHQ6jyb4/CS2vu0ZehGHQoMgmry2pvNMjA9ypSVWRGspcrdcQOJNgYtHmBiBScoURLB+9KJX2ivY8zJFI5e8Hb48sLASkp4HQemBWMQTukSnlgddsAtIKgpoRZWpcJ7PunHuWXAKZPCMH6uF14G71/lhluRjjy5GEnkKhKkKnlX15kmLmylTZJVdMbMRnsGK7exsVS8ot7sYJ9EMIvKJUqKf/RmZvUxZqlGp1oy3Uo5JgBU5MF61wnkad+L1UJsB2ZzPV0S/jYKPFVzBsWXj9IH74D02TcQz774+FQqAXlVLlpglmlnMwOU3IboKOH2Z4LIj7Kx7wfZZMi3/sQbYJM2PWCd8OS/keDf53ZwMKNxWPh1ZB7kX4mqhmMHdNgRblcWXP3LtWKck31Vq1UdGfK4/T/nudD1ve15NPUP1DvcVsDOWnRF4s3IDXZwXWqvag+hz0zVyB/T0X1XkqrPtBNX/o5qeTDP30W2GVdGL6SIlgZHaqqNuamHlhGra43ExKTwRPBsskTrziC2fb/JeqXxJBES/YufiomXw14BnQUpyBfVeV3cDDEZUnfu7lJz19jS+2aTtA6v9Qnps+q0rNnLa54JLf9bWlw4RomSWcJCqkkW/EG0AdTKrqNFYPZVZTLvt+4B8ehWrUWas8MK5jAXeTklr0ao5acGOKWip1wmqIRKRAIT2OBbs9jCmigb2xJNDK4RdUtDYsJeltJ69DvnG7bmTLjfsOQcVIaI40k91N8nnda9+/6BdKFDQtMDB6efGkciWp9ce24uGUzKszD7CmKTlCJiqn/V2bbOKGdk4Tafy4B2HzeaX+fMFjpWu01UMaJJrvYbAnXww1Yg2IjbwdAMTv7z8zPIJ0a+drouylUfvKKeun6BnLe0fR+XbRRs77Rengb30c1plozEFHZjzmQ10uVQSh1wWURJnVSru6b1pyVI+KR3WZHB4vgDx+BDlQjxCk53+Hxm5wv8SgpvNxVkepPVF8ucut9FkGNHov1gyatlEKSzYlrFt0mFQWg20rKMrkB6pEDO8f5W2InR3znO15NTbw/l3BXYGOe1lS0tHljc5zJkmMTdVrJnFEd2RqNPNmFWEn+1bm4NeAr6QEY9fiyBCMWBHEELTfHtu4iS37D1cBEKudpCszaWJiPgEeDu75+IuXa/guZdxWJj/ktDfZQJpp9ork2QScgu31l7QdGfC24C2E6kQp4UHZ3k7wXSTUt61bdmK7BHqjiz3HuP76phzd7nZxwLCpEg8fhtwhNgPx3IrU1B4JX40Wzsy1Tz/8oIcvjykDmI967chWtw/WSschamGBelNt+TV1gVKoLlMpL9QxFcAqXhEC6Nr9nXRZRJAIRun3Vj+EabZoR2YsdghDE9boTE8MBcGCSqGSIb3DQEJFDEKHggAaAB0AHQAcDAhBgkqhkiG9w0BCRUxFAQSVGltZSAxNjUzOTcyMDczODY4MIIHnAYJKoZIhvcNAQcGoIIHjTCCB4kCAQAwggeCBgkqhkiG9w0BBwEwKQYKKoZIhvcNAQwBBjAbBBRmhTM5a6OsdDd4LLR/07U/28/dqgIDAMNQgIIHSCCLUDdxl9rcX65CAYiQD1mrnoDJe+c8hWww8KI+RD1/3U8skUZ+NHjf2cjCrDQdtVZcycc37lkJ4HEU0keMdVE7I9tja81EfQclnZAUgx/zzLQqVV9qc1AcKX0pzUczLewoQZdXQHdpXh0u8Hf4xFeYM3EAGxB0mUYGwZXWSxYSdaHmxTgeftqNHF6tudt0vpPgq9Rbqp7zP8z48VUOSUkbNTXZOgNVpMgs/yKivvURdWBwJMkpOs/daeR+QbOLkhrhTtT8FjwFUlpnQ//8i7UsBBJKcEKvlrfBEDWcIGw8M6oAssoPsCGyXnsP7ZCVBDBgv941mBTJ9Z9vMoKPpr9jZzSVJrU2+DDuxkfSy1KL0vUvZm5PGSiZA72OpRZkNi8ZUbJTRKf71R+hsCtX/ZUQtMlGCX50XUEQl44cvyX32XQb2VlyGvWu0rqgEVS+QZbuWJoZBZAedhzHvnfGiIsnn2PhRyKBvALyGcWAgK0XvC26WF676g2oMk8sjBrp8saPDvMXj06XmD6746i5KC52gLiRAcwlT4zJoA0OB5jYgxXv+/GP9iXNIK578cCGpBes28b7R+hLDBCc/fMv1jMhKWPVXWJZ6VkcpUgH73uxFl43guTZzJfHI1kMF1+PbOviWPdlSj1D44ajloMJP5FXubIfYEIqV19BdU42ZXZ8ISIZYTAj9OhNCUkkTjjGH2VhFz/FjZDxdk9m/Sw+du8dg1v0+6XIMScjuutbLxxol8Dx1yfRSgZZGN+D3vi0hW1OgcpnUhVI/x48LjdWm1IA0XWOzFiJAe98BiL0roTsUk0pgyujzvLcwDFGP9hnQ0YLdCy22UsQ39hRyQzwGAVO8O49bU8sgNy75+4++8Z3pqI91hdoHyzNMSx6fJn/Qd6UcAdTF0divh17q5bZi+x3D7AQEvh5NwePD0HIqBZexT0yNTVTHragJZUetI5FZgE1cZrfchckP/Ub5jdn3e/Cvu8J/yZFAM8glJvO1D+4BZ+/MVAw3AkO7kLhGeXMXr9s9+A/uPlznoC6b9bpjj3X46bFz7dPIYC0aeya87vISA0/5VPkkUZ+U6A9nLkCIcl5XQElMjrzidFJyBmtxHXLrAu5yiWorl3KVOf9QOrKrZt1UrNihIaSIq/46jI5yBQX6LV7fUBrZKe/oMbuf6W0LliNJbKSwZi0RRHo0jBPotUiOsn1qmnh+hZp6rwi1KGOsCAPSMSGnURwoXAdTUmAyPriDjDBKjm2EiDZJ9T3XgNDHVU24SqKjsSoByrD4FcVyqFAl3w0CaSNXloZswE0UqGKoQUy6Up0ceWoeHYfA/FJyaGfkFGRkmYun+wUJZvhpoLv6bn377CziWTSc0o3nl+UZ4pTsRJOlG0FOxzWApjSd8bPIdezPxak2DM0qj6aiUocfEBMLnFn4Sjj1vVFmIGPNXiOPlJF0Ef99I5Gno3YAd4ZHBqpkeUq7+bWur+xhv5zsXs5ARK6TVOVqlMPiKRpDX7lEQoya++U6HIj6zb7arSZivM5YrZeqHFKK4gpORvpg6icApQCBniDgmNxZJFobgzvIwKTABJjoivHs4zIIw6TCjbz38GEFdzbsUuCXQo3tFWaxgiGkxtLnjYr0PTIxFdBfQ5dkRkkxLvUg7uR1uP9IcmO/8QzzyLeSA+I+teZME8QCzui6CY/lhIfjxJimawejCJx33nS9uXNibQ0my41SmXRDGVgiH6el8veIbEHU9RY+elVR6eqlemCuIHfU8QNPNbe7Gzqaaoccd2VUY3PXNHxU87DC7Nttvn99Ow5zxZ8xZUQVfLFntS9d2hgKp8gJ9lgVKzEuYCiL59wuxbNtnAb8mET0Buw24JeQew9e8DdYL2vDLhQz+IqPXKAhlf7BSpPyQTOeaba657CNmkzdiNk3RHGeTRrq4c3/nl1M+ZsPwf8WxoTcmu+W0Y7/j9nps8r+fKlNB23hOEIWZ4KN+Y4qZRKltTARhqmdjLIhUtWh4D49eTe5sS3MqzsZJJwsEHPPOvZKvOG5UU3jXMg9R4F8CaYgx/M4ClwIIlHvcdW7R7sXke9E/qccIG3jQ5b/mgHCk3pVkAyrRWfBZqXxlfWn+cfzVALtUXWePwhN8+i3CQbjLLOgE6yH3/rBfXQQVYHwrZqoyFchDwlFF5FtF5GThnj04kvhZbq0EcF4lbiULAOiBkJong4Op287QYgq4W8szOn9F2m/4M2XNaI3X7w67GADFHs5TtPXjWx1l6kKIwMM2pcpltXblqgH087payQHx1LnCpztxcxmeoFb3owvwKWmQpV0Gh6CIKfa7hqwCsNggOcKEQWwRJtADEXzPhRYG0mPelWLQMdLLaEzUqh9HElXu3awKazlHa1HkV0nywgldm23DPCKj5Fi6hux7vl7vt8K0Q4KA8Xoys4Pw43eRi9puQM3jOJgxX8Q/MsABHHxPBa94bOsRLFUa/Td70xbHpOrCCp64M7cm6kDKAwPjAhMAkGBSsOAwIaBQAEFEi1rtKgyohIpB9yF4t2L1CpwF+ABBSDiyukmk2pIV5XfqW5AtbEC9LvtQIDAYag +kind: Secret +metadata: + creationTimestamp: null + name: es-cert + namespace: imxc diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml new file mode 100644 index 0000000..d2bff8e --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/templates/needtocheck_storageclass.yaml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer + diff --git a/roles/cmoa_install/files/02-base/base/charts/elasticsearch/values.yaml b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/values.yaml new file mode 100644 index 0000000..7b0bd6d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/elasticsearch/values.yaml @@ -0,0 +1,68 @@ +# Default values for sample. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka-manager/.helmignore b/roles/cmoa_install/files/02-base/base/charts/kafka-manager/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka-manager/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka-manager/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka-manager/Chart.yaml new file mode 100644 index 0000000..61a7b7f --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka-manager/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka-manager +version: 0.1.0 diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml new file mode 100644 index 0000000..b20900d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/0.kafka-manager-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-manager + namespace: imxc +spec: + type: NodePort + ports: + - protocol: TCP + port: 80 + nodePort : 32090 + targetPort: 80 + selector: + app: kafka-manager diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml new file mode 100644 index 0000000..4edcf32 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka-manager/templates/1.kafka-manager.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-manager + namespace: imxc +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-manager + template: + metadata: + labels: + app: kafka-manager + spec: + containers: + - name: kafka-manager + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-manager:{{ .Values.global.KAFKA_MANAGER_VERSION }} + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 200m + memory: 1000Mi + ports: + - containerPort: 80 + env: + - name: ZK_HOSTS + value: zookeeper:2181 + command: + - ./bin/kafka-manager + - -Dhttp.port=80 diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka-manager/values.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka-manager/values.yaml new file mode 100644 index 0000000..b5532cd --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka-manager/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka/.helmignore b/roles/cmoa_install/files/02-base/base/charts/kafka/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka/1.broker-config.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka/1.broker-config.yaml new file mode 100644 index 0000000..ddf76e1 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka/1.broker-config.yaml @@ -0,0 +1,161 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + cp /etc/kafka-configmap/log4j.properties /etc/kafka/ + KAFKA_BROKER_ID=${HOSTNAME##*-} + SEDS=("s/#init#broker.id=#init#/broker.id=$KAFKA_BROKER_ID/") + LABELS="kafka-broker-id=$KAFKA_BROKER_ID" + ANNOTATIONS="" + hash kubectl 2>/dev/null || { + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# kubectl not found in path/") + } && { + ZONE=$(kubectl get node "$NODE_NAME" -o=go-template='{{index .metadata.labels "failure-domain.beta.kubernetes.io/zone"}}') + if [ $? -ne 0 ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone lookup failed, see -c init-config logs/") + elif [ "x$ZONE" == "x" ]; then + SEDS+=("s/#init#broker.rack=#init#/#init#broker.rack=# zone label not found for node $NODE_NAME/") + else + SEDS+=("s/#init#broker.rack=#init#/broker.rack=$ZONE/") + LABELS="$LABELS kafka-broker-rack=$ZONE" + fi + # Node Port 설정 주석처리 + # OUTSIDE_HOST=$(kubectl get node "$NODE_NAME" -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') + OUTSIDE_HOST=kafka-outside-${KAFKA_BROKER_ID} + GLOBAL_HOST=kafka-global-${KAFKA_BROKER_ID} + if [ $? -ne 0 ]; then + echo "Outside (i.e. cluster-external access) host lookup command failed" + else + OUTSIDE_PORT=3240${KAFKA_BROKER_ID} + GLOBAL_PORT=3250${KAFKA_BROKER_ID} + # datagate 도입했으므로 Kube DNS 기반 통신 + SEDS+=("s|#init#advertised.listeners=OUTSIDE://#init#|advertised.listeners=OUTSIDE://${OUTSIDE_HOST}:${OUTSIDE_PORT},GLOBAL://${GLOBAL_HOST}:${GLOBAL_PORT}|") + ANNOTATIONS="$ANNOTATIONS kafka-listener-outside-host=$OUTSIDE_HOST kafka-listener-outside-port=$OUTSIDE_PORT" + fi + if [ ! -z "$LABELS" ]; then + kubectl -n $POD_NAMESPACE label pod $POD_NAME $LABELS || echo "Failed to label $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + if [ ! -z "$ANNOTATIONS" ]; then + kubectl -n $POD_NAMESPACE annotate pod $POD_NAME $ANNOTATIONS || echo "Failed to annotate $POD_NAMESPACE.$POD_NAME - RBAC issue?" + fi + } + printf '%s\n' "${SEDS[@]}" | sed -f - /etc/kafka-configmap/server.properties > /etc/kafka/server.properties.tmp + [ $? -eq 0 ] && mv /etc/kafka/server.properties.tmp /etc/kafka/server.properties + server.properties: |- + log.dirs=/var/lib/kafka/data/topics + ############################# Zookeeper ############################# + zookeeper.connect=zookeeper:2181 + #zookeeper.connection.timeout.ms=6000 + ############################# Group Coordinator Settings ############################# + #group.initial.rebalance.delay.ms=0 + ############################# Thread ############################# + #background.threads=10 + #num.recovery.threads.per.data.dir=1 + ############################# Topic ############################# + auto.create.topics.enable=true + delete.topic.enable=true + default.replication.factor=2 + ############################# Msg Replication ############################# + min.insync.replicas=1 + num.io.threads=10 + num.network.threads=4 + num.replica.fetchers=4 + replica.fetch.min.bytes=1 + socket.receive.buffer.bytes=1048576 + socket.send.buffer.bytes=1048576 + replica.socket.receive.buffer.bytes=1048576 + socket.request.max.bytes=204857600 + ############################# Partition ############################# + #auto.leader.rebalance.enable=true + num.partitions=12 + ############################# Log size ############################# + message.max.bytes=204857600 + max.message.bytes=204857600 + ############################# Log Flush Policy ############################# + #log.flush.interval.messages=10000 + #log.flush.interval.ms=1000 + ############################# Log Retention Policy ############################# + log.retention.minutes=1 + offsets.retention.minutes=1440 + #log.retention.bytes=1073741824 + #log.segment.bytes=1073741824 + log.retention.check.interval.ms=10000 + ############################# Internal Topic Settings ############################# + offsets.topic.replication.factor=1 + #transaction.state.log.replication.factor=1 + #transaction.state.log.min.isr=1 + ############################# ETC ############################# + listeners=OUTSIDE://:9094,PLAINTEXT://:9092,GLOBAL://:9095 + listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,OUTSIDE:PLAINTEXT,GLOBAL:PLAINTEXT + #listeners=PLAINTEXT://:9092 + inter.broker.listener.name=PLAINTEXT + #init#broker.id=#init# + #init#broker.rack=#init# + log4j.properties: |- + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka/Chart.yaml new file mode 100644 index 0000000..9565567 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka +version: 0.1.0 diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka/templates/2.dns.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka/templates/2.dns.yaml new file mode 100644 index 0000000..8ffb3f8 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka/templates/2.dns.yaml @@ -0,0 +1,14 @@ +# A headless service to create DNS records +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + namespace: imxc +spec: + ports: + - port: 9092 + clusterIP: None + selector: + app: kafka +--- diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml new file mode 100644 index 0000000..1cd7406 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka/templates/3.bootstrap-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: +# name: bootstrap + name: kafka + namespace: imxc +spec: + ports: + - port: 9092 + selector: + app: kafka diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml new file mode 100644 index 0000000..6f67ab4 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka/templates/4.persistent-volume.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-1 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: data-kafka-cluster-2 + labels: + type: local + app: kafka +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_KAFKA_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: kafka-broker + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +# On-prem/워커노드 두개/브로커 두개 환경에서 발생할 수 있는 affinity 충돌때문에 주석처리 +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: data-kafka-cluster-3 +# labels: +# type: local +# app: kafka +#spec: +# capacity: +# storage: 30Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: {{ .Values.global.IMXC_KAFKA_PV_PATH3 }} +# persistentVolumeReclaimPolicy: Retain +# storageClassName: kafka-broker +# nodeAffinity: +# required: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/hostname +# operator: In +# values: + # - {{ .Values.global.IMXC_KAFKA_HOST3 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml new file mode 100644 index 0000000..1982584 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka/templates/5.kafka.yaml @@ -0,0 +1,132 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + namespace: imxc +spec: + selector: + matchLabels: + app: kafka + serviceName: "kafka-headless" + replicas: 2 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: kafka + annotations: + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kafka + topologyKey: "kubernetes.io/hostname" + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zookeeper + topologyKey: "kubernetes.io/hostname" + terminationGracePeriodSeconds: 30 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: extensions + mountPath: /opt/kafka/libs/extensions + containers: + - name: broker + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 6000Mi + limits: + # This limit was intentionally set low as a reminder that + # the entire Yolean/kubernetes-kafka is meant to be tweaked + # before you run production workloads + cpu: 500m + memory: 10000Mi + env: + - name: CLASSPATH + value: /opt/kafka/libs/extensions/* + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + - name: JMX_PORT + value: "5555" + - name: KAFKA_OPTS + value: -javaagent:/opt/kafka/jmx_prometheus_javaagent-0.15.0.jar=9010:/opt/kafka/config.yaml + ports: + - name: inside + containerPort: 9092 + - name: outside + containerPort: 9094 + - name: global + containerPort: 9095 + - name: jmx + containerPort: 9010 + command: + - ./bin/kafka-server-start.sh + - /etc/kafka/server.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "rm -rf /var/lib/kafka/data/*;kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] +# readinessProbe: +# tcpSocket: +# port: 9092 +# timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/kafka/data + - name: extensions + mountPath: /opt/kafka/libs/extensions + volumes: + - name: configmap + configMap: + name: broker-config + - name: config + emptyDir: {} + - name: extensions + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: kafka-broker + resources: + requests: + storage: 30Gi diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka/templates/6.outside.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka/templates/6.outside.yaml new file mode 100644 index 0000000..c2d8170 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka/templates/6.outside.yaml @@ -0,0 +1,89 @@ +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9094 + port: 32400 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-outside-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9094 + port: 32401 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-0 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "0" + ports: + - protocol: TCP + targetPort: 9095 + port: 32500 + type: ClusterIP +--- +kind: Service +apiVersion: v1 +metadata: + name: kafka-global-1 + namespace: imxc +spec: + selector: + app: kafka + kafka-broker-id: "1" + ports: + - protocol: TCP + targetPort: 9095 + port: 32501 + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9094 + name: kafka + protocol: TCP + targetPort: 9094 + selector: + app: kafka +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker-global + namespace: imxc +spec: + type: ClusterIP + ports: + - port: 9095 + name: kafka + protocol: TCP + targetPort: 9095 + selector: + app: kafka diff --git a/roles/cmoa_install/files/02-base/base/charts/kafka/values.yaml b/roles/cmoa_install/files/02-base/base/charts/kafka/values.yaml new file mode 100644 index 0000000..cb0e677 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/kafka/values.yaml @@ -0,0 +1,68 @@ +# Default values for kafka. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_install/files/02-base/base/charts/postgres/.helmignore b/roles/cmoa_install/files/02-base/base/charts/postgres/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/postgres/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_install/files/02-base/base/charts/postgres/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/postgres/Chart.yaml new file mode 100644 index 0000000..d602e29 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/postgres/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: postgres +version: 0.1.0 diff --git a/roles/cmoa_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml b/roles/cmoa_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml new file mode 100644 index 0000000..95c8bda --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/postgres/templates/1.postgres-configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-config + namespace: imxc + labels: + app: postgres +data: + POSTGRES_DB: postgresdb + POSTGRES_USER: admin + POSTGRES_PASSWORD: eorbahrhkswp diff --git a/roles/cmoa_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml b/roles/cmoa_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml new file mode 100644 index 0000000..dfbd714 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/postgres/templates/2.postgres-storage.yaml @@ -0,0 +1,38 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: postgres-pv-volume + labels: + type: local + app: postgres +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: "{{ .Values.global.IMXC_POSTGRES_PV_PATH }}" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postgres-pv-claim + namespace: imxc + labels: + app: postgres +spec: + storageClassName: manual + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi diff --git a/roles/cmoa_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml b/roles/cmoa_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml new file mode 100644 index 0000000..31e90a2 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/postgres/templates/3.postgres-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: imxc + labels: + app: postgres +spec: + type: ClusterIP + ports: + - port: 5432 + # nodePort: 5432 + selector: + app: postgres diff --git a/roles/cmoa_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml b/roles/cmoa_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml new file mode 100644 index 0000000..14993e8 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/postgres/templates/4.postgres-deployment.yaml @@ -0,0 +1,45 @@ +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Deployment +metadata: + name: postgres + namespace: imxc +spec: +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + selector: + matchLabels: + app: postgres +{{- end }} + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: {{ .Values.global.IMXC_IN_REGISTRY }}/postgres:{{ .Values.global.POSTGRES_VERSION }} + resources: + requests: + cpu: 100m + memory: 2000Mi + limits: + cpu: 300m + memory: 2000Mi + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 5432 + args: ["-c","max_connections=1000","-c","shared_buffers=512MB","-c","deadlock_timeout=5s","-c","statement_timeout=15s","-c","idle_in_transaction_session_timeout=60s"] + envFrom: + - configMapRef: + name: postgres-config + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: postgredb + volumes: + - name: postgredb + persistentVolumeClaim: + claimName: postgres-pv-claim diff --git a/roles/cmoa_install/files/02-base/base/charts/postgres/values.yaml b/roles/cmoa_install/files/02-base/base/charts/postgres/values.yaml new file mode 100644 index 0000000..9972ab8 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/postgres/values.yaml @@ -0,0 +1,68 @@ +# Default values for postgres. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/.helmignore b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.lock b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.lock new file mode 100644 index 0000000..21ff14f --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.8.0 +digest: sha256:3e342a25057f87853e52d83e1d14e6d8727c15fd85aaae22e7594489cc129f15 +generated: "2021-08-09T15:49:41.56962208Z" diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.yaml new file mode 100644 index 0000000..3b08f9c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.8.22 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source message broker software that implements the Advanced Message + Queuing Protocol (AMQP) +home: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: rabbitmq +sources: +- https://github.com/bitnami/bitnami-docker-rabbitmq +- https://www.rabbitmq.com +version: 8.20.5 diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/README.md b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/README.md new file mode 100644 index 0000000..9b26b09 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/README.md @@ -0,0 +1,566 @@ +# RabbitMQ + +[RabbitMQ](https://www.rabbitmq.com/) is an open source message broker software that implements the Advanced Message Queuing Protocol (AMQP). + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### RabitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------- | ---------------------- | +| `image.registry` | RabbitMQ image registry | `docker.io` | +| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` | +| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.8.21-debian-10-r13` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + + +### Common parameters + +| Name | Description | Value | +| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `[]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `[]` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.servers` | List of LDAP servers hostnames | `[]` | +| `ldap.port` | LDAP servers port | `389` | +| `ldap.user_dn_pattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind | `cn=${username},dc=example,dc=org` | +| `ldap.tls.enabled` | If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter | `false` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + + +### Statefulset parameters + +| Name | Description | Value | +| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategyType` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the filesystem used by the containers | `1001` | +| `podSecurityContext.runAsUser` | User ID for the service user running the pod | `1001` | +| `containerSecurityContext` | RabbitMQ containers' Security Context | `{}` | +| `resources.limits` | The resources limits for RabbitMQ containers | `{}` | +| `resources.requests` | The requested resources for RabbitMQ containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + + +### RBAC parameters + +| Name | Description | Value | +| ----------------------- | --------------------------------------------------- | ------ | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `rbac.create` | Whether RBAC rules should be created | `true` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | ----------------------------------------------- | --------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessMode` | PVC Access Mode for RabbitMQ data volume | `ReadWriteOnce` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.volumes` | Additional volumes without creating PVC | `[]` | + + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.port` | Amqp port | `5672` | +| `service.portName` | Amqp service port name | `amqp` | +| `service.tlsPort` | Amqp TLS port | `5671` | +| `service.tlsPortName` | Amqp TLS service port name | `amqp-ssl` | +| `service.nodePort` | Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.tlsNodePort` | Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` | `""` | +| `service.distPort` | Erlang distribution server port | `25672` | +| `service.distPortName` | Erlang distribution service port name | `dist` | +| `service.distNodePort` | Node port override for `dist` port, if serviceType is `NodePort` | `""` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.managerPort` | RabbitMQ Manager port | `15672` | +| `service.managerPortName` | RabbitMQ Manager service port name | `http-stats` | +| `service.managerNodePort` | Node port override for `http-stats` port, if serviceType `NodePort` | `""` | +| `service.metricsPort` | RabbitMQ Prometheues metrics port | `9419` | +| `service.metricsPortName` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.metricsNodePort` | Node port override for `metrics` port, if serviceType is `NodePort` | `""` | +| `service.epmdNodePort` | Node port override for `epmd` port, if serviceType is `NodePort` | `""` | +| `service.epmdPortName` | EPMD Discovery service port name | `epmd` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.certManager` | Set this to true in order to add the corresponding annotations for cert-manager | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` | + + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------- | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| -------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r172` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + + +The above parameters map to the env variables defined in [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + bitnami/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command. + +Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/). + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +* Create a secret with the certificates and associate the secret when deploying the chart +* Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls/). + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](http://www.rabbitmq.com/management.html#load-definitions). + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. + +Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/). + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/). + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512MB" +``` + +* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/). + +### Recover the cluster from complete shutdown + +> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand. + +The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover. + +This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state: + +```console +$ kubectl delete statefulset STATEFULSET_NAME --cascade=false +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests. + +If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod): + +```console +$ helm upgrade RELEASE_NAME bitnami/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set clustering.forceBoot=true \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting). + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/). + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/) diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml new file mode 100644 index 0000000..344c403 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.8.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.8.0 diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/README.md b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/README.md new file mode 100644 index 0000000..054e51f --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/README.md @@ -0,0 +1,327 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for policy | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..ae45d5e --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_capabilities.tpl @@ -0,0 +1,117 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for policy. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..f905f20 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_ingress.tpl @@ -0,0 +1,55 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..60b84a7 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_secrets.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..1e5bba9 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..18d9813 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (not $existingSecretValue) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml new file mode 100644 index 0000000..de92d88 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/ci/tolerations-values.yaml @@ -0,0 +1,4 @@ +tolerations: + - key: foo + operator: "Equal" + value: bar diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt new file mode 100644 index 0000000..24ffa89 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/NOTES.txt @@ -0,0 +1,167 @@ +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.port .Values.service.tlsPort -}} +{{- $serviceNodePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.nodePort .Values.service.tlsNodePort -}} +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $serviceNodePort }} at {{ include "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clustering.k8s_domain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "rabbitmq.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "rabbitmq.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "rabbitmq.fullname" . }} {{ .Values.service.metricsPort }}:{{ .Values.service.metricsPort }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.metricsPort }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "rabbitmq.validateValues" . -}} + +{{- $requiredPassword := list -}} +{{- $secretNameRabbitmq := include "rabbitmq.secretPasswordName" . -}} + +{{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) -}} + {{- $requiredRabbitmqPassword := dict "valueKey" "auth.password" "secret" $secretNameRabbitmq "field" "rabbitmq-password" -}} + {{- $requiredPassword = append $requiredPassword $requiredRabbitmqPassword -}} +{{- end -}} + +{{- if not .Values.auth.existingErlangSecret -}} + {{- $requiredErlangPassword := dict "valueKey" "auth.erlangCookie" "secret" $secretNameRabbitmq "field" "rabbitmq-erlang-cookie" -}} + {{- $requiredPassword = append $requiredPassword $requiredErlangPassword -}} +{{- end -}} + +{{- $requiredRabbitmqPasswordErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPassword "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $requiredRabbitmqPasswordErrors) "context" $) -}} + +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl new file mode 100644 index 0000000..6b46b23 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,247 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "rabbitmq.name" -}} +{{- include "common.names.name" . -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rabbitmq.fullname" -}} +{{- include "common.names.fullname" . -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "rabbitmq.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "rabbitmq.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 o base 10 number system. +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} +{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }} +{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }} +{{- if eq $unit "Ki" }} + {{- mul $value 1024 }} +{{- else if eq $unit "Mi" }} + {{- mul $value 1024 1024 }} +{{- else if eq $unit "Gi" }} + {{- mul $value 1024 1024 1024 }} +{{- else if eq $unit "Ti" }} + {{- mul $value 1024 1024 1024 1024 }} +{{- else if eq $unit "Pi" }} + {{- mul $value 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "Ei" }} + {{- mul $value 1024 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "K" }} + {{- mul $value 1000 }} +{{- else if eq $unit "M" }} + {{- mul $value 1000 1000 }} +{{- else if eq $unit "G" }} + {{- mul $value 1000 1000 1000 }} +{{- else if eq $unit "T" }} + {{- mul $value 1000 1000 1000 1000 }} +{{- else if eq $unit "P" }} + {{- mul $value 1000 1000 1000 1000 1000 }} +{{- else if eq $unit "E" }} + {{- mul $value 1000 1000 1000 1000 1000 1000 }} +{{- end }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- if or (not (gt $serversListLength 0)) (not (and .Values.ldap.port .Values.ldap.user_dn_pattern)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers", + "ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]="lmy-ldap-server" \ + --set ldap.port="389" \ + --set user_dn_pattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not .Values.ingress.certManager) (not .Values.ingress.selfSigned) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Relay on cert-manager to create it by setting `ingress.certManager=true` + - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml new file mode 100644 index 0000000..5ba6b72 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/configuration.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-config + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }} + {{- if .Values.advancedConfiguration}} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | nindent 4 }} + {{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml new file mode 100644 index 0000000..db74e50 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/ingress.yaml @@ -0,0 +1,57 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.managerPortName "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http-stats" "context" $) | nindent 14 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 0000000..158aeaa --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: 4369 # EPMD + - port: {{ .Values.service.port }} + - port: {{ .Values.service.tlsPort }} + - port: {{ .Values.service.distPort }} + - port: {{ .Values.service.managerPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "rabbitmq.fullname" . }}-client: "true" + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if .Values.networkPolicy.additionalRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }} + {{- end }} + {{- end }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.service.metricsPort }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml new file mode 100644 index 0000000..bf06b66 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.pdb.create }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 0000000..a1ba629 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "rabbitmq.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml new file mode 100644 index 0000000..d0f8bdd --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pv.yaml @@ -0,0 +1,22 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: rabbitmq-pv + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: {{ .Values.global.RABBITMQ_PATH }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml new file mode 100644 index 0000000..c677752 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/pvc.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: rabbitmq-pvc + namespace: imxc + labels: + app: rabbitmq +spec: + storageClassName: rabbitmq + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi + diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/role.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/role.yaml new file mode 100644 index 0000000..9bd029e --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 0000000..74f82f0 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml new file mode 100644 index 0000000..4d14e4e --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/secrets.yaml @@ -0,0 +1,43 @@ +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.auth.existingPasswordSecret) (not .Values.loadDefinition.enabled) }} + {{- if .Values.auth.password }} + rabbitmq-password: {{ .Values.auth.password | b64enc | quote }} + {{- else }} + rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if not .Values.auth.existingErlangSecret }} + {{- if .Values.auth.erlangCookie }} + rabbitmq-erlang-cookie: {{ .Values.auth.erlangCookie | b64enc | quote }} + {{- else }} + rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- $extraSecretsPrependReleaseName := .Values.extraSecretsPrependReleaseName }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + {{- if $extraSecretsPrependReleaseName }} + name: {{ $.Release.Name }}-{{ $key }} + {{- else }} + name: {{ $key }} + {{- end }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 0000000..562fde9 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +secrets: + - name: {{ include "rabbitmq.fullname" . }} +{{- end }} + diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 0000000..46b9040 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "rabbitmq.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + {{- with .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml new file mode 100644 index 0000000..45abd14 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,382 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.statefulsetLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.statefulsetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "rabbitmq.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + updateStrategy: + type: {{ .Values.updateStrategyType }} + {{- if (eq "OnDelete" .Values.updateStrategyType) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configuration.yaml") . | sha256sum }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + {{- include "rabbitmq.podAnnotations" . | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- if or (.Values.initContainers) (and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.podSecurityContext }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "/bitnami/rabbitmq/mnesia" + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/rabbitmq/mnesia" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext }} + securityContext: {{- toYaml .Values.containerSecurityContext | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ template "rabbitmq.fullname" . }}-headless" + - name: K8S_ADDRESS_TYPE + value: {{ .Values.clustering.addressType }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + {{- if .Values.loadDefinition.enabled }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "yes" + - name: RABBITMQ_SECURE_PASSWORD + value: "no" + {{- else }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: "no" + - name: RABBITMQ_SECURE_PASSWORD + value: "yes" + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + {{- end }} + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + containerPort: 5672 + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-ssl + containerPort: {{ .Values.service.tlsPort }} + {{- end }} + - name: dist + containerPort: 25672 + - name: stats + containerPort: 15672 + - name: epmd + containerPort: 4369 + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: 9419 + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- toYaml .Values.extraContainerPorts | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + - name: stomp + containerPort: 61613 + livenessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + lifecycle: + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + postStart: + exec: + command: + - /bin/bash + - -ec + - | + until rabbitmqctl cluster_status >/dev/null; do + echo "Waiting for cluster readiness..." + sleep 5 + done + rabbitmq-queues rebalance "all" + {{- end }} + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + resources: + requests: + memory: "500Mi" + cpu: "150m" + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: /bitnami/rabbitmq/mnesia + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.persistence.volumes }} + {{- toYaml .Values.persistence.volumes | nindent 8 }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + secret: + secretName: {{ template "rabbitmq.tlsSecretName" . }} + items: + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- end }} + - name: configuration + configMap: + name: {{ template "rabbitmq.fullname" . }}-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + {{- if .Values.advancedConfiguration}} + - key: advanced.config + path: advanced.config + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if not (contains "data" (quote .Values.persistence.volumes)) }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} + {{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 0000000..4ed26cc --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }}-headless + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if or (.Values.service.annotationsHeadless) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotationsHeadless}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotationsHeadless "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: amqp + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.portName }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml new file mode 100644 index 0000000..2b4c224 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/svc.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "rabbitmq.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.service.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or (.Values.service.annotations) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotations}} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.tlsPortName }} + port: {{ .Values.service.tlsPort }} + targetPort: amqp-ssl + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.tlsNodePort)) }} + nodePort: {{ .Values.service.tlsNodePort }} + {{- end }} + {{- end }} + - name: {{ .Values.service.epmdPortName }} + port: 4369 + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.epmdNodePort))) }} + nodePort: {{ .Values.service.epmdNodePort }} + {{- end }} + - name: {{ .Values.service.distPortName }} + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.distNodePort))) }} + nodePort: {{ .Values.service.distNodePort }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.managerPortName }} + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.managerNodePort))) }} + nodePort: {{ .Values.service.managerNodePort }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.metricsPortName }} + port: {{ .Values.service.metricsPort }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.metricsNodePort))) }} + nodePort: {{ .Values.service.metricsNodePort }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml new file mode 100644 index 0000000..b6a6078 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/templates/tls-secrets.yaml @@ -0,0 +1,74 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls" .Values.ingress.hostname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +--- +{{- end }} +{{- end }} +{{- if (include "rabbitmq.createTlsSecret" . )}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "rabbitmq.fullname" . }}-certs + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if or (not .Values.auth.tls.autoGenerated ) (and .Values.auth.tls.caCertificate .Values.auth.tls.serverCertificate .Values.auth.tls.serverKey) }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate| b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 365 }} + {{- $fullname := include "rabbitmq.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "rabbitmq.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.schema.json b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.schema.json new file mode 100644 index 0000000..8ef33ef --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + } + } + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Prometheus metrics for RabbitMQ", + "description": "Install Prometheus plugin in the RabbitMQ container", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.yaml b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.yaml new file mode 100644 index 0000000..5b74e6c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/rabbitmq/values.yaml @@ -0,0 +1,1151 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +## @section RabitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry RabbitMQ image registry +## @param image.repository RabbitMQ image repository +## @param image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: rabbitmq + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} + + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + +## @section Common parameters + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" + +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" + +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + password: "eorbahrhkswp" + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + erlangCookie: "pf6t82zTrqY9iaupUmkPOJxPXjmjiNEd" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + existingSecret: "" + existingSecretFullChain: false + +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" + +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65536" + +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" + +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256MB + ## + value: 0.4 + +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s rabbitmq_stomp" + +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" + +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap rabbitmq_stomp" + +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: false + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" + +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +args: [] + +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 + +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] + +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" + +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] + +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## +configuration: |- + {{- if not .Values.loadDefinition.enabled -}} + ## Username and password + ## + default_user = {{ .Values.auth.username }} + default_pass = eorbahrhkswp + {{- end }} + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }} + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + {{- end }} + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + {{ tpl .Values.extraConfiguration . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.tlsPort }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1 = rabbit_auth_backend_ldap + auth_backends.2 = internal + {{- range $index, $server := .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ .Values.ldap.port }} + auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + ## Prometheus metrics + ## + prometheus.tcp.port = 9419 + {{- end }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB + #load_definitions = /app/load_definition.json + +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. +## advancedConfiguration: |- +## [{ +## rabbitmq_auth_backend_ldap, +## [{ +## ssl_options, +## [{ +## verify, verify_none +## }, { +## fail_if_no_peer_cert, +## false +## }] +## ]} +## }]. +## +advancedConfiguration: |- + +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.servers List of LDAP servers hostnames + ## + servers: [] + ## @param ldap.port LDAP servers port + ## + port: "389" + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.user_dn_pattern Pattern used to translate the provided username into a value to be used for the LDAP bind + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + ## @param ldap.tls.enabled If you enable TLS/SSL you can set advanced options using the `advancedConfiguration` parameter + ## + enabled: false + +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] + +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false + +## @section Statefulset parameters + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 + +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady + +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## @param updateStrategyType Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategyType: RollingUpdate + +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} + +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" + +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} + +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: {} + +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroup Group ID for the filesystem used by the containers +## @param podSecurityContext.runAsUser User ID for the service user running the pod +## +podSecurityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## @param containerSecurityContext RabbitMQ containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## RabbitMQ containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + ## Example: + ## limits: + ## cpu: 1000m + ## memory: 2Gi + limits: {} + ## Examples: + ## requests: + ## cpu: 1000m + ## memory: 2Gi + requests: {} + +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} + +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} + +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} + +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] + +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: IfNotPresent +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" + +## @section RBAC parameters + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + +## @section Persistence parameters + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "rabbitmq" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessMode PVC Access Mode for RabbitMQ data volume + ## + accessMode: ReadWriteOnce + + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "rabbitmq-pvc" + + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 5Gi + + ## @param persistence.volumes Additional volumes without creating PVC + ## - name: volume_name + ## emptyDir: {} + ## + volumes: [] + +## @section Exposure parameters + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + # type: NodePort + type: ClusterIP + + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + portEnabled: true + + ## @param service.port Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## @param service.portName Amqp service port name + ## + portName: amqp + + ## @param service.tlsPort Amqp TLS port + ## + tlsPort: 5671 + + ## @param service.tlsPortName Amqp TLS service port name + ## + tlsPortName: amqp-ssl + + ## @param service.nodePort Node port override for `amqp` port, if serviceType is `NodePort` or `LoadBalancer` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## e.g: + ## nodePort: 30672 + ## + nodePort: "" + + ## @param service.tlsNodePort Node port override for `amqp-ssl` port, if serviceType is `NodePort` or `LoadBalancer` + ## e.g: + ## tlsNodePort: 30671 + ## + tlsNodePort: "" + + ## @param service.distPort Erlang distribution server port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## @param service.distPortName Erlang distribution service port name + ## + distPortName: dist + + ## @param service.distNodePort Node port override for `dist` port, if serviceType is `NodePort` + ## e.g: + ## distNodePort: 30676 + ## + distNodePort: "" + + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPortEnabled: true + + ## @param service.managerPort RabbitMQ Manager port + ## + managerPort: 15672 + + ## @param service.managerPortName RabbitMQ Manager service port name + ## + managerPortName: http-stats + + ## @param service.managerNodePort Node port override for `http-stats` port, if serviceType `NodePort` + ## e.g: + ## managerNodePort: 30673 + ## + managerNodePort: "" + + ## @param service.metricsPort RabbitMQ Prometheues metrics port + ## + metricsPort: 9419 + + ## @param service.metricsPortName RabbitMQ Prometheues metrics service port name + ## + metricsPortName: metrics + + ## @param service.metricsNodePort Node port override for `metrics` port, if serviceType is `NodePort` + ## e.g: + ## metricsNodePort: 30674 + ## + metricsNodePort: "" + + ## @param service.epmdNodePort Node port override for `epmd` port, if serviceType is `NodePort` + ## e.g: + ## epmdNodePort: 30675 + ## + epmdNodePort: "" + + ## @param service.epmdPortName EPMD Discovery service port name + ## + epmdPortName: epmd + + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: + - name: stomp + port: 61613 + targetPort: 61613 + #nodePort: 31613 + + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + + ## @param ingress.annotations Ingress annotations + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + ## + annotations: {} + + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Relay on cert-manager to create it by setting `ingress.certManager=true` + ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + + ## @param ingress.certManager Set this to true in order to add the corresponding annotations for cert-manager + ## to generate a TLS secret for the ingress record + ## + certManager: false + + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.additionalRules Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## e.g: + ## additionalRules: + ## - matchLabels: + ## - role: frontend + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + additionalRules: [] + +## @section Metrics Parameters + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.metricsPort }}" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Specify Metric Relabellings to add to the scrape endpoint + ## + relabellings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Used to pass Labels that are required by the installed Prometheus Operator + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.path Define the path used by ServiceMonitor to scrap metrics + ## Could be /metrics for aggregated metrics or /metrics/per-object for more details + path: "" + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] + +## @section Init Container Parameters + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: 10.10.31.243:5000/cmoa3 # docker.io + repository: bitnami-shell # bitnami/bitnami-shell + tag: 10-debian-10-r175 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: + - regcred + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/.helmignore b/roles/cmoa_install/files/02-base/base/charts/redis/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/Chart.lock b/roles/cmoa_install/files/02-base/base/charts/redis/Chart.lock new file mode 100644 index 0000000..ee0ecb7 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.3.3 +digest: sha256:264db18c8d0962b5c4340840f62306f45fe8d2c1c8999dd41c0f2d62fc93a220 +generated: "2021-01-15T00:05:10.125742807Z" diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/Chart.yaml new file mode 100644 index 0000000..6924d59 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 6.0.10 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: https://github.com/bitnami/charts/tree/master/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +- http://redis.io/ +version: 12.7.0 diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/README.md b/roles/cmoa_install/files/02-base/base/charts/redis/README.md new file mode 100644 index 0000000..3befa8c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/README.md @@ -0,0 +1,707 @@ +# RedisTM Chart packaged by Bitnami + +[RedisTM](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +Disclaimer: REDIS® is a registered trademark of Redis Labs Ltd.Any rights therein are reserved to Redis Labs Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Labs Ltd. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis +``` + +## Introduction + +This chart bootstraps a [RedisTM](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +### Choose between RedisTM Helm Chart and RedisTM Cluster Helm Chart + +You can choose any of the two RedisTM Helm charts for deploying a RedisTM cluster. +While [RedisTM Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) will deploy a master-slave cluster using RedisTM Sentinel, the [RedisTM Cluster Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) will deploy a RedisTM Cluster topology with sharding. +The main features of each chart are the following: + +| RedisTM | RedisTM Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![RedisTM Topology](img/redis-topology.png) | ![RedisTM Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/redis +``` + +The command deploys RedisTM on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the RedisTM chart and their default values. + +| Parameter | Description | Default | +|:------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | RedisTM password (overrides `password`) | `nil` | +| `image.registry` | RedisTM Image registry | `docker.io` | +| `image.repository` | RedisTM Image name | `bitnami/redis` | +| `image.tag` | RedisTM Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `2` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | RedisTM password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common RedisTM node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable security context (both redis master and slave containers) | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container (both redis master and slave containers) | `1001` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Specifies annotations to add to ServiceAccount. | `nil` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | RedisTM exporter image registry | `docker.io` | +| `metrics.image.repository` | RedisTM exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | RedisTM exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.serviceMonitor.relabelings` | ServiceMonitor relabelings. Value is evaluated as a template | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | ServiceMonitor metricRelabelings. Value is evaluated as a template | `[]` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.hostAliases` | Add deployment host aliases | `[]` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.persistence.volumes` | Additional volumes without creating PVC | `{}` | +| `master.statefulset.labels` | Additional labels for redis master StatefulSet | `{}` | +| `master.statefulset.annotations` | Additional annotations for redis master StatefulSet | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.statefulset.volumeClaimTemplates.labels` | Additional labels for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.podLabels` | Additional labels for RedisTM master pod | {} | +| `master.podAnnotations` | Additional annotations for RedisTM master pod | {} | +| `master.extraEnvVars` | Additional Environment Variables passed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the master's stateful set | `[]` | +| `podDisruptionBudget.enabled` | Pod Disruption Budget toggle | `false` | +| `podDisruptionBudget.minAvailable` | Minimum available pods | `1` | +| `podDisruptionBudget.maxUnavailable` | Maximum unavailable | `nil` | +| `redisPort` | RedisTM port (in both master and slaves) | `6379` | +| `tls.enabled` | Enable TLS support for replication traffic | `false` | +| `tls.authClients` | Require clients to authenticate or not | `true` | +| `tls.certificatesSecret` | Name of the secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `nil` | +| `tls.certKeyFilename` | Certificate key filename | `nil` | +| `tls.certCAFilename` | CA Certificate filename | `nil` | +| `tls.dhParamsFilename` | DH params (in order to support DH based ciphers) | `nil` | +| `master.command` | RedisTM master entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `master.preExecCmds` | Text to inset into the startup script immediately prior to `master.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `master.configmap` | Additional RedisTM configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of RedisTM commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | RedisTM master additional command line flags | [] | +| `master.nodeSelector` | RedisTM master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for RedisTM master pod assignment | [] | +| `master.affinity` | Affinity settings for RedisTM master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | RedisTM master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `5` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.shareProcessNamespace` | RedisTM Master pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `master.priorityClassName` | RedisTM Master pod priorityClassName | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `volumePermissions.securityContext.*` | Security context of the init container | `{}` | +| `volumePermissions.securityContext.runAsUser` | UserID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | 0 | +| `slave.hostAliases` | Add deployment host aliases | `[]` | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.command` | RedisTM slave entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `slave.preExecCmds` | Text to inset into the startup script immediately prior to `slave.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `slave.configmap` | Additional RedisTM configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of RedisTM commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | RedisTM slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `5` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `1` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.shareProcessNamespace` | RedisTM slave pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.labels` | Additional labels for redis slave StatefulSet | `{}` | +| `slave.statefulset.annotations` | Additional annotations for redis slave StatefulSet | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.statefulset.volumeClaimTemplates.labels` | Additional labels for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.extraEnvVars` | Additional Environment Variables passed to the pod of the slave's stateful set set | `[]` | +| `slave.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the slave's stateful set set | `[]` | +| `masslaveter.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the slave's stateful set | `[]` | +| `slave.podLabels` | Additional labels for RedisTM slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for RedisTM slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | RedisTM slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.tolerations` | Toleration labels for RedisTM slave pod assignment | [] | +| `slave.spreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for RedisTM slave pod | {} | +| `slave.priorityClassName` | RedisTM Slave pod priorityClassName | `nil` | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a RedisTM node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | RedisTM Sentinel port | `26379` | +| `sentinel.configmap` | Additional RedisTM configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for RedisTM read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for RedisTM sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for RedisTM read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for RedisTM sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if RedisTM sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | RedisTM sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | RedisTM Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | RedisTM Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | RedisTM Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sentinel.extraEnvVars` | Additional Environment Variables passed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the sentinel node statefulset | `[]` | +| `sentinel.preExecCmds` | Text to inset into the startup script immediately prior to `sentinel.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/minideb` | +| `sysctlImage.tag` | sysctlImage Init container tag | `buster` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + bitnami/redis +``` + +The above command sets the RedisTM server password to `secretpassword`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the RedisTM pod as it attempts to write to the `/bitnami` directory. Consider installing RedisTM with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change RedisTM version + +To modify the RedisTM version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/redis/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master node allowed) and a RedisTM slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - RedisTM Master service: Points to the master, where read-write operations can be performed + - RedisTM Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master allowed) and a RedisTM slave StatefulSet. In this case, the pods will contain an extra container with RedisTM Sentinel. This container will form a cluster of RedisTM Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - RedisTM service: Exposes port 6379 for RedisTM read-only operations and port 26379 for accessing RedisTM Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the RedisTM Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for RedisTM you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +For example: + +First, create the secret with the cetificates files: + +```console +kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem +``` + +Then, use the following parameters: + +```console +tls.enabled="true" +tls.certificatesSecret="certificates-tls-secret" +tls.certFilename="cert.pem" +tls.certKeyFilename="cert.key" +tls.certCAFilename="ca.pem" +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +RedisTM may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: + +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + install_packages procps + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/redis +``` + +## Backup and restore + +### Backup + +To perform a backup you will need to connect to one of the nodes and execute: + +```bash +$ kubectl exec -it my-redis-master-0 bash + +$ redis-cli +127.0.0.1:6379> auth your_current_redis_password +OK +127.0.0.1:6379> save +OK +``` + +Then you will need to get the created dump file form the redis node: + +```bash +$ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis +``` + +### Restore + +To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. + +Follow the following steps: + +- First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step. + +```yaml +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly no + # Disable RDB persistence, AOF persistence already enabled. + save "" +``` + +- Start the new cluster to create the PVCs. + +For example, : + +```bash +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +- Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod. + +``` +$ helm delete new-redis + +$ kubectl run --generator=run-pod/v1 -i --rm --tty volpod --overrides=' +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "redisvolpod" + }, + "spec": { + "containers": [{ + "command": [ + "tail", + "-f", + "/dev/null" + ], + "image": "bitnami/minideb", + "name": "mycontainer", + "volumeMounts": [{ + "mountPath": "/mnt", + "name": "redisdata" + }] + }], + "restartPolicy": "Never", + "volumes": [{ + "name": "redisdata", + "persistentVolumeClaim": { + "claimName": "redis-data-new-redis-master-0" + } + }] + } +}' --image="bitnami/minideb" + +$ kubectl cp dump.rdb redisvolpod:/mnt/dump.rdb +$ kubectl delete pod volpod +``` + +- Start again the cluster: + +``` +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +## NetworkPolicy + +To enable network policy for RedisTM, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to RedisTM. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the RedisTM Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the RedisTM Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release bitnami/redis --set persistence.existingClaim= + ``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the RedisTM Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install bitnami/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use RedisTM Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the RedisTM StatefulSet before upgrading: + +```bash +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the RedisTM slave (and metrics if enabled) deployment: + +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Upgrading + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.0.0 + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the RedisTM exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 7.0.0 + +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling RedisTM Sentinel containers inside of the RedisTM Pods (feature disabled by default). In case the master crashes, a new RedisTM node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/.helmignore b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/Chart.yaml new file mode 100644 index 0000000..ceb5648 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.3.3 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.3.3 diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/README.md b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/README.md new file mode 100644 index 0000000..461fdc9 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/README.md @@ -0,0 +1,316 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|--------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Inpput | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------|----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|-------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for RedisTM are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..1ff26d5 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..d95b569 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,61 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl new file mode 100644 index 0000000..aafde9f --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..622ef50 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,42 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if typeIs "int" .servicePort }} + number: {{ .servicePort }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4931d94 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,127 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- $name = .name -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..77bcc2b --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..8679ddf --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..bb5ed72 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a786188 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..992bcd3 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..3e2a47c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,72 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis(TM) required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $existingSecret := include "common.redis.values.existingSecret" . -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $valueKeyRedisPassword := printf "%s%s" $valueKeyPrefix "password" -}} + {{- $valueKeyRedisUsePassword := printf "%s%s" $valueKeyPrefix "usePassword" -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $usePassword := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUsePassword "context" .context) -}} + {{- if eq $usePassword "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Redis Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.redis.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Redis(TM) is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.redis.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..fb2fe60 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/values.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/ci/default-values.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 0000000..71132f7 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 0000000..7efeda3 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,682 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: redis # bitnami/redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-sentinel # bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + service: + ## Redis(TM) Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + service: + ## Redis(TM) Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: {} + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + service: + ## Redis(TM) Slave Service type + type: ClusterIP + ## Redis(TM) port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + extraFlags: [] + ## List of Redis(TM) commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: 10.10.31.243:5000 # docker.io + repository: redis-exporter # bitnami/redis-exporter + tag: 1.5.3-debian-10-r14 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/NOTES.txt b/roles/cmoa_install/files/02-base/base/charts/redis/templates/NOTES.txt new file mode 100644 index 0000000..a254f58 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/NOTES.txt @@ -0,0 +1,136 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis(TM) service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if and .Values.sentinel.enabled (not .Values.cluster.enabled)}} + +------------------------------------------------------------------------------- + WARNING + + Using redis sentinel without a cluster is not supported. A single pod with + standalone redis has been deployed. + + To deploy redis sentinel, please use the values "cluster.enabled=true" and + "sentinel.enabled=true". + +------------------------------------------------------------------------------- +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis(TM) Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.imxc.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.imxc.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace imxc {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis(TM) server: + +1. Run a Redis(TM) pod that you can use as a client: + +{{- if .Values.tls.enabled }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image {{ template "redis.image" . }} --command -- sleep infinity + + Copy your TLS certificates to the pod: + + kubectl cp --namespace imxc /path/to/client.cert {{ template "redis.fullname" . }}-client:/tmp/client.cert + kubectl cp --namespace imxc /path/to/client.key {{ template "redis.fullname" . }}-client:/tmp/client.key + kubectl cp --namespace imxc /path/to/CA.cert {{ template "redis.fullname" . }}-client:/tmp/CA.cert + + Use the following command to attach to the pod: + + kubectl exec --tty -i {{ template "redis.fullname" . }}-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --namespace imxc -- bash +{{- else }} + kubectl run --namespace imxc {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash +{{- end }} + +2. Connect using the Redis(TM) CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} + +{{- include "redis.validateValues" . }} \ No newline at end of file diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/_helpers.tpl b/roles/cmoa_install/files/02-base/base/charts/redis/templates/_helpers.tpl new file mode 100644 index 0000000..193105d --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/_helpers.tpl @@ -0,0 +1,421 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis(TM) secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis(TM) password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis(TM) - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.spreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.slave.spreadConstraints -}} +redis: spreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "redis.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "redis.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml new file mode 100644 index 0000000..02411c8 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap-scripts.yaml @@ -0,0 +1,393 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-scripts + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + echo "I am master" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + else + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the redis + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "slave" ]]; then + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + else + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + fi + + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" + } + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.imxc.svc.{{ .Values.clusterDomain }}" + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + REDIS_MASTER_HOST="$(hostname -i)" + REDIS_MASTER_PORT_NUMBER="{{ .Values.redisPort }}" + else + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the sentinel + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_replica() { + if [[ "$1" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $1 {{ .Values.redisPort }}" + fi + } + + {{- if .Values.sentinel.staticID }} + # remove generated known sentinels and replicas + tmp="$(sed -e '/^sentinel known-/d' -e '/^$/d' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + echo "$tmp" > /opt/bitnami/redis-sentinel/etc/sentinel.conf + + for node in $(seq 0 {{ .Values.cluster.slaveCount }}); do + NAME="{{ template "redis.fullname" . }}-node-$node" + IP="$(getent hosts "$NAME.$HEADLESS_SERVICE" | awk ' {print $1 }')" + if [[ "$NAME" != "$HOSTNAME" && -n "$IP" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $IP {{ .Values.sentinel.port }} $(host_id "$NAME")" + add_replica "$IP" + fi + done + add_replica "$(hostname -i)" + {{- end }} + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} +{{- else }} + start-master.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if .Values.cluster.enabled }} + start-slave.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} + +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap.yaml new file mode 100644 index 0000000..923272c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/configmap.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{- tpl .Values.configmap . | nindent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{- tpl .Values.master.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{- tpl .Values.slave.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-node-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{- tpl .Values.sentinel.configmap . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/headless-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..7db7371 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: redis + port: {{ .Values.redisPort }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: redis-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/health-configmap.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..0bbbfb6 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 0000000..928f9a8 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,39 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} +# {{- if .Values.metrics.serviceMonitor.namespace }} +# namespace: {{ .Values.metrics.serviceMonitor.namespace }} +# {{- else }} + namespace: imxc +# {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "metrics" + namespaceSelector: + matchNames: + - imxc +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..4dae3bc --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "metrics" + {{- if .Values.metrics.service.labels -}} + {{- toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{- toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{ if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..ae27ebb --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,74 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/pdb.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/pdb.yaml new file mode 100644 index 0000000..e2ad471 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/pdb.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..fba6450 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: imxc + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/psp.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/psp.yaml new file mode 100644 index 0000000..f3c9390 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/psp.yaml @@ -0,0 +1,43 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 0000000..78aa2e6 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,378 @@ +{{- if or (not .Values.cluster.enabled) (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.statefulset.labels }} + {{- toYaml .Values.master.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.master.statefulset.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.annotations | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master + {{- if .Values.master.podLabels }} + {{- toYaml .Values.master.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- toYaml .Values.master.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- with .Values.master.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.master.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.master.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ include "redis.tplValue" (dict "value" .Values.persistence.existingClaim "context" $) }} + {{- end }} + {{- if .Values.master.persistence.volumes }} + {{- toYaml .Values.master.persistence.volumes | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.master.persistence.volumes) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + {{- if .Values.master.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.master.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.master.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.master.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 0000000..56ba5f1 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,43 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{- toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{- toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{ if eq .Values.master.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{- toYaml . | nindent 4 }} +{{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml new file mode 100644 index 0000000..5d697de --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-node-statefulset.yaml @@ -0,0 +1,494 @@ +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-node + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: node + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: node + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + env: + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.slave.persistence.path }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.sentinel.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: {{ template "sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.port | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.dhParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.sentinel.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.sentinel.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-pv.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-pv.yaml new file mode 100644 index 0000000..adb5416 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-pv.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-master +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-master-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-0 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-0 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: redis + type: local + name: redis-pv-slave-1 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + capacity: + storage: 8Gi + claimRef: + kind: PersistentVolumeClaim + name: redis-data-redis-slave-1 + namespace: imxc + hostPath: + path: {{ .Values.global.IMXC_REDIS_PV_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: manual + volumeMode: Filesystem + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-role.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-role.yaml new file mode 100644 index 0000000..0d14129 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-role.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{- toYaml .Values.rbac.role.rules | nindent 2 }} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 0000000..83c87f5 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 0000000..9452003 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 0000000..be0894b --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,384 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName | quote }} + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.slave.shareProcessNamespace }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -c + - /opt/bitnami/scripts/start-scripts/start-slave.sh + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.slave.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} + envFrom: + {{- if .Values.slave.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.slave.extraEnvVarsCM }} + {{- end }} + {{- if .Values.slave.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.slave.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.readinessProbe.timeoutSeconds }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if and .Values.usePassword (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://localhost:{{ .Values.redisPort }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 0000000..c1f3ae5 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{- toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: {{- toYaml .Values.slave.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{ if eq .Values.slave.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.slave.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 0000000..3b3458e --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,43 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{- toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: {{- toYaml .Values.sentinel.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy }} + {{- end }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: redis-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/templates/secret.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/templates/secret.yaml new file mode 100644 index 0000000..c1103d2 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + namespace: imxc + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/values.schema.json b/roles/cmoa_install/files/02-base/base/charts/redis/values.schema.json new file mode 100644 index 0000000..3188d0c --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "slave/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/roles/cmoa_install/files/02-base/base/charts/redis/values.yaml b/roles/cmoa_install/files/02-base/base/charts/redis/values.yaml new file mode 100644 index 0000000..fcd8710 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/redis/values.yaml @@ -0,0 +1,932 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: 10.10.31.243:5000/cmoa3 + repository: redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: latest + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +fullnameOverride: redis + +## Cluster settings +## +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +## +sentinel: + enabled: false + #enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + ## + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + #registry: docker.io + registry: 10.10.31.243:5000 + repository: bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.10-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + ## + service: + ## Redis(TM) Sentinel Service type + ## + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + + ## Additional commands to run prior to starting Redis(TM) node with sentinel + ## + preExecCmds: "" + + ## An array to add extra env var to the sentinel node configurations + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + #enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + ## Add annotations to service account + # annotations: + # iam.gke.io/gcp-service-account: "sa@project.iam.gserviceaccount.com" + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +## +securityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Use password authentication +## +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "dkagh1234!" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +## +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + ## + service: + ## Redis(TM) Master Service type + ## + type: ClusterIP + # type: NodePort + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31379 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + volumes: + # - name: volume_name + # emptyDir: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: '' + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + ## + service: + ## Redis(TM) Slave Service type + ## + type: ClusterIP + #type: NodePort + ## Redis(TM) port + ## + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: 31380 + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + ## + port: 6379 + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + ## + extraFlags: [] + ## List of Redis(TM) commands to disable + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: '' + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false +# enabled: true + + image: + registry: 10.10.31.243:5000 # registry.cloud.intermax:5000 + repository: redis/redis-exporter + #tag: 1.15.1-debian-10-r2 + tag: latest + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + ## + selector: + prometheus: kube-prometheus + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + relabelings: [] + + ## MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + metricRelabelings: [] + # - sourceLabels: + # - "__name__" + # targetLabel: "__name__" + # action: replace + # regex: '(.*)' + # replacement: 'example_prefix_$1' + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current redis service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: '' + service: + type: ClusterIP + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false + ## + securityContext: + runAsUser: 0 + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctlImage: + enabled: false + command: [] + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/roles/cmoa_install/files/02-base/base/charts/zookeeper/.helmignore b/roles/cmoa_install/files/02-base/base/charts/zookeeper/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/zookeeper/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_install/files/02-base/base/charts/zookeeper/Chart.yaml b/roles/cmoa_install/files/02-base/base/charts/zookeeper/Chart.yaml new file mode 100644 index 0000000..c9a2bfb --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/zookeeper/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: zookeeper +version: 0.1.0 diff --git a/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml new file mode 100644 index 0000000..3b23a9e --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/0.config.yaml @@ -0,0 +1,35 @@ +kind: ConfigMap +metadata: + name: zookeeper-config + namespace: imxc +apiVersion: v1 +data: + init.sh: |- + #!/bin/bash + set -e + set -x + [ -d /var/lib/zookeeper/data ] || mkdir /var/lib/zookeeper/data + [ -z "$ID_OFFSET" ] && ID_OFFSET=1 + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET)) + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid + cp -Lur /etc/kafka-configmap/* /etc/kafka/ + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties + zookeeper.properties: |- + tickTime=2000 + dataDir=/var/lib/zookeeper/data + dataLogDir=/var/lib/zookeeper/log + clientPort=2181 + maxClientCnxns=1 + initLimit=5 + syncLimit=2 + server.1=zookeeper-0.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.2=zookeeper-1.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + server.3=zookeeper-2.zookeeper-headless.imxc.svc.cluster.local:2888:3888:participant + log4j.properties: |- + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + # Suppress connection log messages, three lines per livenessProbe execution + log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN + log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN diff --git a/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml new file mode 100644 index 0000000..422433a --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/1.service-leader-election.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: zookeeper-headless + namespace: imxc +spec: + ports: + - port: 2888 + name: peer + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + storage: persistent + diff --git a/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml new file mode 100644 index 0000000..9fdcf95 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/2.service-client.yaml @@ -0,0 +1,12 @@ +# the headless service is for PetSet DNS, this one is for clients +apiVersion: v1 +kind: Service +metadata: + name: zookeeper + namespace: imxc +spec: + ports: + - port: 2181 + name: client + selector: + app: zookeeper diff --git a/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml new file mode 100644 index 0000000..2a909f7 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/3.persistent-volume.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-1 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH1 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value1 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-2 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH2 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value2 }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: zookeeper-cluster-3 + labels: + type: local + app: zookeeper +spec: + capacity: + storage: 30Gi + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.global.IMXC_ZOOKEEPER_PATH3 }} + persistentVolumeReclaimPolicy: Retain + storageClassName: zookeeper-storage + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.global.affinity_key }} + operator: In + values: + - {{ .Values.global.affinity_value3 }} diff --git a/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml new file mode 100644 index 0000000..a9e5cb8 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/4.statefulset.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: zookeeper + namespace: imxc +spec: + selector: + matchLabels: + app: zookeeper + storage: persistent + serviceName: "zookeeper-headless" + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zookeeper + storage: persistent + annotations: + spec: + terminationGracePeriodSeconds: 10 + initContainers: + - name: init-config + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-initutils:{{ .Values.global.KAFKA_INITUTILS_VERSION }} + command: ['/bin/bash', '/etc/kafka-configmap/init.sh'] + volumeMounts: + - name: configmap + mountPath: /etc/kafka-configmap + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + containers: + - name: zookeeper + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka:{{ .Values.global.KAFKA_VERSION }} + resources: + requests: + cpu: 100m + memory: 200Mi + limits: + cpu: 200m + memory: 500Mi + env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties + command: + - ./bin/zookeeper-server-start.sh + - /etc/kafka/zookeeper.properties + lifecycle: + preStop: + exec: + command: ["sh", "-ce", "kill -s TERM 1; while $(kill -0 1 2>/dev/null); do sleep 1; done"] + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election +# readinessProbe: +# exec: +# command: +# - /bin/sh +# - -c +# - '[ "imok" = "$(echo ruok | nc -w 1 -q 1 127.0.0.1 2181)" ]' + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper + volumes: + - name: configmap + configMap: + name: zookeeper-config + - name: config + emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: zookeeper-storage + resources: + requests: + storage: 30Gi diff --git a/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml new file mode 100644 index 0000000..e08ed54 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/zookeeper/templates/5.pvc.yaml @@ -0,0 +1,50 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-0 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-1 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: imxc + name: data-zookeeper-2 +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 30Gi + storageClassName: zookeeper-storage + selector: + matchLabels: + app: zookeeper \ No newline at end of file diff --git a/roles/cmoa_install/files/02-base/base/charts/zookeeper/values.yaml b/roles/cmoa_install/files/02-base/base/charts/zookeeper/values.yaml new file mode 100644 index 0000000..7b06985 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/charts/zookeeper/values.yaml @@ -0,0 +1,68 @@ +# Default values for zookeeper. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/roles/cmoa_install/files/02-base/base/index.yaml b/roles/cmoa_install/files/02-base/base/index.yaml new file mode 100644 index 0000000..62a41a3 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/index.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +entries: {} +generated: "2019-11-05T09:47:03.285264152+09:00" diff --git a/roles/cmoa_install/files/02-base/base/templates/role.yaml b/roles/cmoa_install/files/02-base/base/templates/role.yaml new file mode 100644 index 0000000..28f0e32 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/templates/role.yaml @@ -0,0 +1,16 @@ +kind: ClusterRoleBinding +{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} +metadata: + name: imxc-cluster-admin-clusterrolebinding +subjects: +- kind: ServiceAccount + name: default + namespace: imxc +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin diff --git a/roles/cmoa_install/files/02-base/base/values.yaml b/roles/cmoa_install/files/02-base/base/values.yaml new file mode 100644 index 0000000..e2ad288 --- /dev/null +++ b/roles/cmoa_install/files/02-base/base/values.yaml @@ -0,0 +1,73 @@ +global: + # cluster variables + CLUSTER_ID: cloudmoa + + # default storageClass + DEFAULT_STORAGE_CLASS: exem-local-storage + + # nodeAffinity + affinity_key: cmoa + affinity_value1: worker1 + affinity_value2: worker2 + affinity_value3: worker2 + + # postgres variables + IMXC_POSTGRES_PV_PATH: /media/data/postgres/postgres-data-0 + + #elastic variables + ELASTICSEARCH_PATH1: /media/data/elasticsearch/elasticsearch-data-0 + ELASTICSEARCH_PATH2: /media/data/elasticsearch/elasticsearch-data-1 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + # zookeeper variables + IMXC_ZOOKEEPER_PATH1: /media/data/zookeeper/zookeeper-data-0 + IMXC_ZOOKEEPER_PATH2: /media/data/zookeeper/zookeeper-data-1 + IMXC_ZOOKEEPER_PATH3: /media/data/zookeeper/zookeeper-data-2 + + # kafka variables + IMXC_KAFKA_PV_PATH1: /media/data/kafka/kafka-data-0 + IMXC_KAFKA_PV_PATH2: /media/data/kafka/kafka-data-1 + IMXC_KAFKA_PV_PATH3: /media/data/kafka/kafka-data-2 + KAFKA_BROKER_CONFIG: "{{index .metadata.labels \"failure-domain.beta.kubernetes.io/zone\"}}" + + # cortex variables + IMXC_INGESTER_PV_PATH1: /media/cloudmoa/ingester/ingester-data-1 + IMXC_INGESTER_PV_PATH2: /media/cloudmoa/ingester/ingester-data-2 + IMXC_INGESTER_PV_PATH3: /media/cloudmoa/ingester/ingester-data-3 + + # redis variables + IMXC_REDIS_PV_PATH1: /media/data/redis/redis-data-0 + IMXC_REDIS_PV_PATH2: /media/data/redis/redis-data-1 + IMXC_REDIS_PV_PATH3: /media/data/redis/redis-data-2 + + # rabbitmq variables + RABBITMQ_PATH: /media/data/rabbitmq + + # custom or etc variables + # IMXC_WORKER_NODE_NAME: $IMXC_WORKER_NODE_NAME # deprecated 2021.10.21 + # IMXC_MASTER_IP: 10.10.30.202 + IMXC_API_SERVER_DNS: imxc-api-service + + METRIC_ANALYZER_MASTER_VERSION: rel0.0.0 + METRIC_ANALYZER_WORKER_VERSION: rel0.0.0 + ELASTICSEARCH_VERSION: v1.0.0 + KAFKA_MANAGER_VERSION: v1.0.0 + KAFKA_INITUTILS_VERSION: v1.0.0 + #KAFKA_VERSION: v1.0.0 + KAFKA_VERSION: v1.0.1 + METRICS_SERVER_VERSION: v1.0.0 + POSTGRES_VERSION: v1.0.0 + CASSANDRA_VERSION: v1.0.0 + RABBITMQ_VERSION: v1.0.0 + CORTEX_VERSION: v1.11.0 #v1.9.0 + #CONSUL_VERSION: 0.7.1 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + rabbitmq: + image: + registry: 10.10.31.243:5000/cmoa3 # {{ .Values.global.IMXC_REGISTRY }} + tag: v1.0.0 # {{ .Values.global.RABBITMQ_VERSION }} diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh new file mode 100755 index 0000000..4079243 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/es-ddl-put.sh @@ -0,0 +1,3085 @@ +#!/bin/bash + +kubectl -n imxc wait --for=condition=ready pod/elasticsearch-1 --timeout=600s + +namespace=$1 +export ES_NODEPORT=`kubectl -n ${namespace} get svc elasticsearch -o jsonpath='{.spec.ports[*].nodePort}'` + +export MASTER_IP=`kubectl get node -o wide | grep control-plane | awk '{print $6}'` + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SECURE=true + +if [ $SECURE = true ] +then +PARAM="-u elastic:elastic --insecure" +PROTO="https" +else +PARAM="" +PROTO="http" +fi + +echo Secure=$SECURE +echo Param=$PARAM +echo Proto=$PROTO + +curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices + +echo "curl ${PARAM} -X GET ${PROTO}://${MASTER_IP}:${ES_NODEPORT}/_cat/indices" + +# kubernetes_cluster_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_info" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "date": { + "type": "long" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + } +}' + +# kubernetes_cluster_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cluster_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cluster_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cluster_history" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "nodes": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_cluster_history": {} + } +}' + +# kubernetes_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_info" + }, + "sort.field": "mtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "id": { + "type": "keyword" + }, + "mtime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_info": {} + } +}' + + + +# kubernetes_event_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_event_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_event_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_event_info" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_event_info": {} + } +}' + + + + +# kubernetes_job_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_job_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_job_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_job_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "commandlist": { + "type": "text", + "index": false + }, + "labellist": { + "type": "text", + "index": false + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_job_info": {} + } +}' + + + +# kubernetes_cronjob_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_cronjob_info' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_cronjob_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_cronjob_info" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "jobname": { + "type": "keyword" + }, + "kind": { + "type": "keyword" + }, + "starttime": { + "type": "long" + }, + "endtime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "lastruntime": { + "type": "long" + }, + "arguments": { + "type": "text", + "index": false + }, + "schedule": { + "type": "keyword" + }, + "active": { + "type": "boolean" + }, + "status": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_cronjob_info": {} + } +}' + + + + +# kubernetes_network_connectivity +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_network_connectivity' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_network_connectivity-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_network_connectivity" + } + } + }, + "mappings": { + "properties": { + "timestamp": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "container": { + "type": "keyword" + }, + "pid": { + "type": "integer" + }, + "peerNode": { + "type": "keyword" + }, + "peerNamespace": { + "type": "keyword" + }, + "peerService": { + "type": "keyword" + }, + "peerPod": { + "type": "keyword" + }, + "peerContainer": { + "type": "keyword" + }, + "peerPid": { + "type": "integer" + } + } + }, + "aliases": { + "kubernetes_network_connectivity": {} + } +}' + + + +# sparse_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sparse_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "sparse_log" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "date": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "logpath": { + "type": "text", + "index": false + }, + "contents": { + "type": "text" + }, + "lineNumber": { + "type": "integer" + }, + "probability": { + "type": "float" + }, + "subentityId": { + "type": "keyword" + } + } + }, + "aliases": { + "sparse_log": {} + } +}' + + + +# sparse_model +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sparse_model' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sparse_model" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s" + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "targetType": { + "type": "keyword" + }, + "targetId": { + "type": "keyword" + }, + "modifiedDate": { + "type": "long" + }, + "logPath": { + "type": "keyword" + }, + "savedModel": { + "type": "text", + "index": false + } + } + } +}' + + + +# kubernetes_pod_info +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_info' -H 'Content-Type: application/json' -d '{ +"order": 0, + "index_patterns": [ + "kubernetes_pod_info-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_info" + } + } + }, + "mappings": { + "properties": { + "eventType": {"type": "keyword"}, + "cluster": {"type": "keyword"}, + "namespace": {"type": "keyword"}, + "node": {"type": "keyword"}, + "pod": {"type": "keyword"}, + "podUID": {"type": "keyword"}, + "podCreationTimestamp": {"type": "long"}, + "podDeletionTimestamp": {"type": "long"}, + "podDeletionGracePeriod": {"type": "long"}, + "resourceVersion": {"type": "keyword"}, + "ownerKind": {"type": "keyword"}, + "ownerName": {"type": "keyword"}, + "ownerUID": {"type": "keyword"}, + "podPhase": {"type": "keyword"}, + "podIP": {"type": "keyword"}, + "podStartTime": {"type": "long"}, + "podReady": {"type": "boolean"}, + "podContainersReady": {"type": "boolean"}, + "isInitContainer": {"type": "boolean"}, + "containerName": {"type": "keyword"}, + "containerID": {"type": "keyword"}, + "containerImage": {"type": "keyword"}, + "containerImageShort": {"type": "keyword"}, + "containerReady": {"type": "boolean"}, + "containerRestartCount": {"type": "integer"}, + "containerState": {"type": "keyword"}, + "containerStartTime": {"type": "long"}, + "containerMessage": {"type": "keyword"}, + "containerReason": {"type": "keyword"}, + "containerFinishTime": {"type": "long"}, + "containerExitCode": {"type": "integer"}, + "containerLastState": {"type": "keyword"}, + "containerLastStartTime": {"type": "long"}, + "containerLastMessage": {"type": "keyword"}, + "containerLastReason": {"type": "keyword"}, + "containerLastFinishTime": {"type": "long"}, + "containerLastExitCode": {"type": "integer"} + } + }, + "aliases": { + "kubernetes_pod_info": {} + } +}' + + + +# kubernetes_pod_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_pod_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_pod_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_pod_history" + } + } + }, + "mappings": { + "properties": { + "deployName": { + "type": "keyword" + }, + "deployType": { + "type": "keyword" + }, + "deployDate": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "podPhase": { + "type": "keyword" + }, + "startTime": { + "type": "keyword" + }, + "endTime": { + "type": "keyword" + }, + "exitCode": { + "type": "integer" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "time": { + "type": "long" + }, + "containerId": { + "type": "keyword" + }, + "containerName": { + "type": "keyword" + }, + "containerPhase": { + "type": "keyword" + }, + "eventAction": { + "type": "keyword" + }, + "containerStartTime": { + "type": "keyword" + }, + "containerEndTime": { + "type": "keyword" + }, + "containerImage": { + "type": "keyword" + }, + "containerImageShort": { + "type": "keyword" + } + } + }, + "aliases": { + "kubernetes_pod_history": {} + } +}' + + + + +# metric_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/metric_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/metric_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "metric_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "metric_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "anomaly": { + "type": "boolean" + }, + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "instance": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "metricId": { + "type": "keyword" + }, + "nodeId": { + "type": "keyword" + }, + "score": { + "type": "integer" + }, + "subKey": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "yhatLowerUpper": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "aliases": { + "metric_score": {} + } +}' + + + + +# entity_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/entity_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/entity_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "entity_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "entity_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "contName": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "podId": { + "type": "keyword" + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "nodeId": { + "type": "keyword" + }, + "maxId": { + "type": "keyword" + }, + "maxScore": { + "type": "integer" + }, + "entityScore": { + "type": "integer" + } + } + }, + "aliases": { + "entity_score": {} + } +}' + + +# timeline_score +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/timeline_score' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/timeline_score' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "timeline_score-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "timeline_score" + }, + "sort.field": "unixtime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "clstId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "criticalCount": { + "type": "integer" + }, + "warningCount": { + "type": "integer" + }, + "attentionCount": { + "type": "integer" + }, + "normalCount": { + "type": "integer" + }, + "unixtime": { + "type": "long" + } + } + }, + "aliases": { + "timeline_score": {} + } +}' + + + +# spaninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/spaninfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/spaninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "spaninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "spaninfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "ip": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "spanId": { + "type": "keyword" + }, + "parentSpanId": { + "type": "keyword" + }, + "protocolType": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "operation": { + "type": "keyword" + }, + "spanKind": { + "type": "keyword" + }, + "component": { + "type": "keyword" + }, + "error": { + "type": "boolean" + }, + "peerAddress": { + "type": "keyword" + }, + "peerHostname": { + "type": "keyword" + }, + "peerIpv4": { + "type": "keyword" + }, + "peerIpv6": { + "type": "keyword" + }, + "peerPort": { + "type": "integer" + }, + "peerService": { + "type": "keyword" + }, + "samplingPriority": { + "type": "keyword" + }, + "httpStatusCode": { + "type": "integer" + }, + "httpUrl": { + "type": "keyword" + }, + "httpMethod": { + "type": "keyword" + }, + "httpApi": { + "type": "keyword" + }, + "dbInstance": { + "type": "keyword" + }, + "dbStatement": { + "type": "keyword" + }, + "dbType": { + "type": "keyword" + }, + "dbUser": { + "type": "keyword" + }, + "messagebusDestination": { + "type": "keyword" + }, + "logs": { + "dynamic": false, + "type": "nested", + "properties": { + "fields": { + "dynamic": false, + "type": "nested", + "properties": { + "value": { + "ignore_above": 256, + "type": "keyword" + }, + "key": { + "type": "keyword" + } + } + }, + "timestamp": { + "type": "long" + } + } + } + } + }, + "aliases": { + "spaninfo": {} + } +}' + + + +# sta_podinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_podinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_podinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_podinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "version": { + "type": "keyword" + }, + "components": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + } + } + }, + "aliases": { + "sta_podinfo": {} + } +}' + + +# sta_httpapi +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpapi' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpapi-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpapi" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "api": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_httpapi": {} + } +}' + + + +# sta_httpsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_httpsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_httpsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_httpsummary" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "pod": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "api": { + "type": "keyword" + }, + "countTotal": { + "type": "integer" + }, + "errorCountTotal": { + "type": "integer" + }, + "timeTotalMicrosec": { + "type": "integer" + }, + "methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_httpsummary": {} + } +}' + + + +# sta_relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_relation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_relation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "parent": { + "type": "keyword" + }, + "children": { + "type": "nested", + "properties": { + "name": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "sta_relation": {} + } +}' + + + +# sta_externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_externalrelation" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "timestamp": { + "type": "long" + }, + "externalNamespace": { + "type": "keyword" + }, + "externalService": { + "type": "keyword" + } + } + }, + "aliases": { + "sta_externalrelation": {} + } +}' + + + +# sta_traceinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_traceinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_traceinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_traceinfo" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "traceId": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "operationName": { + "type": "keyword" + }, + "spanSize": { + "type": "integer" + }, + "relatedServices": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "error": { + "type": "boolean" + } + } + }, + "aliases": { + "sta_traceinfo": {} + } +}' + + + +# sta_tracetrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/sta_tracetrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "sta_tracetrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "0", + "refresh_interval": "1s", + "lifecycle": { + "name": "sta_tracetrend" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "serviceName": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": {"type": "integer"} + } + }, + { + "errors": { + "match": "error*", + "mapping": {"type": "integer"} + } + } + ] + }, + "aliases": { + "sta_tracetrend": {} + } +}' + +# script_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/script_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + + + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/script_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "script_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "script_history" + } + } + }, + "mappings": { + "properties": { + "taskId": { + "type": "long" + }, + "scriptName": { + "type": "keyword" + }, + "agentName": { + "type": "keyword" + }, + "targetFile": { + "type": "keyword" + }, + "args": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "validCmd": { + "type": "keyword" + }, + "validVal": { + "type": "keyword" + }, + "valid": { + "type": "boolean" + }, + "validResult": { + "type": "keyword" + }, + "cronExp": { + "type": "keyword" + }, + "createUser": { + "type": "keyword" + }, + "startTime": { + "type": "long" + }, + "endTime": { + "type": "long" + }, + "error": { + "type": "boolean" + }, + "result": { + "type": "keyword" + }, + "order": { + "type": "keyword" + }, + "mtime": { + "type": "keyword" + } + } + }, + "aliases": { + "script_history": {} + } +}' + + +# kubernetes_audit_log +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/kubernetes_audit_log' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "kubernetes_audit_log-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "kubernetes_audit_log" + }, + "sort.field": "stageTimestamp", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "verb": { + "type": "keyword" + }, + "userName": { + "type": "keyword" + }, + "sourceIps": { + "type": "keyword" + }, + "resource": { + "type": "keyword" + }, + "code": { + "type": "keyword" + }, + "requestReceivedTimestamp": { + "type": "long" + }, + "stageTimestamp": { + "type": "long" + }, + "durationTimestamp": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + } + } + }, + "aliases": { + "kubernetes_audit_log": {} + } +}' + +# license_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/license_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/license_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "license_history-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "license_history" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "license_history": {} + } +}' + +# alert_event_history +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/alert_event_history' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/alert_event_history' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "alert_event_history-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "alert_event_history" + } + } + }, + "mappings": { + "properties": { + "alertName": { + "type": "keyword" + }, + "clusterId": { + "type": "keyword" + }, + "data": { + "type": "text", + "index": false + }, + "entityId": { + "type": "keyword" + }, + "entityType": { + "type": "keyword" + }, + "level": { + "type": "keyword" + }, + "metaId": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "startsAt": { + "type": "long" + }, + "threshold": { + "type": "double" + }, + "value": { + "type": "double" + }, + "message": { + "type": "keyword" + }, + "endsAt": { + "type": "long" + }, + "status": { + "type": "keyword" + }, + "hookCollectAt": { + "type": "long" + } + } + }, + "aliases": { + "alert_event_history": {} + } +}' + +# JSPD ilm +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/jspd_ilm' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +}' + +# jspd_lite-activetxn +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-activetxn' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-activetxn-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "cpu_time": { + "type": "integer" + }, + "memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_exec_count": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "active_sql_elapse_time": { + "type": "integer" + }, + "db_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "thread_id": { + "type": "long" + }, + "state": { + "type": "short" + }, + "method_id": { + "type": "integer" + }, + "method_seq": { + "type": "integer" + }, + "stack_crc": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-activetxn": {} + } +}' + +# jspd_lite-alert +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-alert' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-alert-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "status": { + "type": "short" + }, + "value": { + "type": "integer" + }, + "pid": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-alert": {} + } +}' + +# jspd_lite-e2einfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-e2einfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-e2einfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "root_tid": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "e2e_info_type": { + "type": "short" + }, + "e2e_key": { + "type": "keyword" + }, + "elapse_time": { + "type": "integer" + }, + "dest_url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-e2einfo": {} + } +}' + +# jspd_lite-methodname +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-methodname' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-methodname-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "method_id": { + "type": "integer" + }, + "class_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + }, + "method_name": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-methodname": {} + } +}' + +# jspd_lite-sqldbinfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-sqldbinfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-sqldbinfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "url": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_lite-sqldbinfo": {} + } +}' + +# jspd_lite-txninfo +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txninfo' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txninfo-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "start_time": { + "type": "long" + }, + "end_time": { + "type": "long" + }, + "tid": { + "type": "keyword" + }, + "txn_name": { + "type": "keyword" + }, + "client_ip": { + "type": "keyword" + }, + "exception": { + "type": "short" + }, + "thread_cpu_time": { + "type": "integer" + }, + "thread_memory_usage": { + "type": "integer" + }, + "web_id": { + "type": "integer" + }, + "open_conn": { + "type": "integer" + }, + "close_conn": { + "type": "integer" + }, + "open_stmt": { + "type": "integer" + }, + "close_stmt": { + "type": "integer" + }, + "open_rs": { + "type": "integer" + }, + "close_rs": { + "type": "integer" + }, + "prepare_count": { + "type": "integer" + }, + "sql_execute_count": { + "type": "integer" + }, + "sql_elapse_time": { + "type": "integer" + }, + "sql_elapse_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + }, + "txn_flag": { + "type": "integer" + }, + "http_method": { + "type": "keyword" + }, + "http_status": { + "type": "integer" + }, + "duration": { + "type": "long" + } + } + }, + "aliases": { + "jspd_lite-txninfo": {} + } +}' + +# jspd_lite-txnmethod +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnmethod' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnmethod-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "method_seq": { + "type": "integer" + }, + "method_id": { + "type": "integer" + }, + "calling_method_id": { + "type": "integer" + }, + "stack_crc32": { + "type": "integer" + }, + "calling_stack_crc32": { + "type": "integer" + }, + "elapse_time": { + "type": "integer" + }, + "exec_count": { + "type": "integer" + }, + "error_count": { + "type": "integer" + }, + "cpu_time": { + "type": "integer" + }, + "memory": { + "type": "integer" + }, + "start_time": { + "type": "long" + }, + "method_depth": { + "type": "integer" + }, + "exception": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 32768, + "type": "keyword" + } + } + } + } + }, + "aliases": { + "jspd_lite-txnmethod": {} + } +}' + +# jspd_lite-txnsql +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-txnsql' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-txnsql-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "tid": { + "type": "keyword" + }, + "db_id": { + "type": "integer" + }, + "cursor_id": { + "type": "integer" + }, + "sql_text": { + "type": "text", + "fields": { + "keyword": { + "ignore_above": 102400, + "type": "keyword" + } + } + }, + "method_id": { + "type": "integer" + }, + "execute_count": { + "type": "integer" + }, + "elapsed_time": { + "type": "integer" + }, + "elapsed_time_max": { + "type": "integer" + }, + "fetch_count": { + "type": "integer" + }, + "fetch_time": { + "type": "integer" + }, + "fetch_time_max": { + "type": "integer" + }, + "internal_fetch_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-txnsql": {} + } +}' + +# jspd_lite-wasstat +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_lite-wasstat' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_lite-wasstat-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "active_txns": { + "type": "integer" + }, + "sql_exec_count": { + "type": "long" + }, + "sql_prepare_count": { + "type": "long" + }, + "sql_fetch_count": { + "type": "long" + }, + "txn_end_count": { + "type": "long" + }, + "open_file_count": { + "type": "integer" + }, + "close_file_count": { + "type": "integer" + }, + "open_socket_count": { + "type": "integer" + }, + "close_socket_count": { + "type": "integer" + }, + "txn_elapse": { + "type": "long" + }, + "sql_elapse": { + "type": "long" + }, + "txn_elapse_max": { + "type": "long" + }, + "sql_elapse_max": { + "type": "long" + }, + "txn_error_count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_lite-wasstat": {} + } +}' + +# jspd_tta-externalrelation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-externalrelation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-externalrelation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "external_namespace": { + "type": "keyword" + }, + "external_service": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-externalrelation": {} + } +}' + +# jspd_tta-relation +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-relation' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-relation-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "from_service": { + "type": "keyword" + }, + "to_service": { + "type": "keyword" + }, + "count": { + "type": "integer" + } + } + }, + "aliases": { + "jspd_tta-relation": {} + } +}' + +# jspd_tta-txnlist +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnlist' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnlist-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + } + } + }, + "aliases": { + "jspd_tta-txnlist": {} + } +}' + +# jspd_tta-txnsummary +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txnsummary' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txnsummary-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "txn_name": { + "type": "keyword" + }, + "req_count": { + "type": "integer" + }, + "resp_count": { + "type": "integer" + }, + "total_duration": { + "type": "long" + }, + "failed": { + "type": "integer" + }, + "http_methods": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "http_statuses": { + "type": "integer", + "fields": { + "integer": { + "type": "integer" + } + } + } + } + }, + "aliases": { + "jspd_tta-txnsummary": {} + } +}' + +# jspd_tta-txntrend +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/jspd_tta-txntrend' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "jspd_tta-txntrend-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "jspd_ilm" + } + } + }, + "mappings": { + "properties": { + "server_uuid": { + "type": "keyword" + }, + "time": { + "type": "long" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "service": { + "type": "keyword" + }, + "pod": { + "type": "keyword" + }, + "endTimeGTE": { + "type": "long" + }, + "endTimeLT": { + "type": "long" + } + }, + "dynamic_templates": [ + { + "totals": { + "match": "total*", + "mapping": { + "type": "integer" + } + } + }, + { + "errors": { + "match": "error*", + "mapping": { + "type": "integer" + } + } + } + ] + }, + "aliases": { + "jspd_tta-txntrend": {} + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "5d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl $PARAM -X PUT $PROTO'://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/maximum_metrics' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "maximum_metrics" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "maximum_metrics" + }, + "sort.field": "date", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "kind": { + "type": "keyword" + }, + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "entity": { + "type": "keyword" + }, + "maximum": { + "type": "float" + }, + "date": { + "type": "date", + "format": "yyyy-MM-dd" + } + } + } +}' diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0-rel332/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/elasticsearch/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/jspd_menumeta.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh new file mode 100644 index 0000000..46007cd --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/1_kubernete_event_info_create_dest_source_index.sh @@ -0,0 +1,220 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "7d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": '""${NUM_SHARDS}""', + "number_of_replicas": '""${NUM_REPLICAS}""', + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + } + }, + "analysis": { + "analyzer": { + "my_customer_ngram_analyzer": { + "tokenizer": "my_customer_ngram_tokenizer" + } + }, + "tokenizer": { + "my_customer_ngram_tokenizer": { + "type": "ngram", + "min_gram": "2", + "max_gram": "3" + } + } + } + }, + "mappings": { + "properties": { + "cluster": { + "type": "keyword" + }, + "namespace": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "unixtime": { + "type": "long" + }, + "kind": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "firsttime": { + "type": "long" + }, + "lasttime": { + "type": "long" + }, + "data": { + "type": "text", + "index": false + }, + "id": { + "type": "keyword" + }, + "reason": { + "type": "keyword" + }, + "message": { + "type": "text", + "fields": { + "ngram": { + "type": "text", + "analyzer": "my_customer_ngram_analyzer" + } + } + }, + "count": { + "type": "integer" + }, + "sourceComponent": { + "type": "keyword" + }, + "sourceHost": { + "type": "keyword" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..a9c833c --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/2_kubernete_event_info_reindex_to_dest_from_source.sh @@ -0,0 +1,28 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..abaa743 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/3_kubernete_event_info_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh new file mode 100644 index 0000000..7948b08 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/4_kubernete_event_info_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='kubernetes_event_info' +DEST_INDEX='kubernetes_event_info_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh new file mode 100644 index 0000000..0ddc9ff --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/5_license_history_create_dest_source_index.sh @@ -0,0 +1,184 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +export NUM_SHARDS=2 +export NUM_REPLICAS=1 + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +# 기존 index 재매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${SOURCE_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${SOURCE_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${SOURCE_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${SOURCE_INDEX}'": {} + } +}' + +# 기존 index 데이터 백업용 index 매핑 +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_ilm/policy/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "policy": { + "phases": { + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +}' + +curl -X PUT 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_template/'"${DEST_INDEX}"'' -H 'Content-Type: application/json' -d '{ + "order": 0, + "index_patterns": [ + "'${DEST_INDEX}'-*" + ], + "settings": { + "index": { + "number_of_shards": "2", + "number_of_replicas": "1", + "refresh_interval": "1s", + "lifecycle": { + "name": "'${DEST_INDEX}'" + }, + "sort.field": "checkTime", + "sort.order": "desc" + } + }, + "mappings": { + "properties": { + "licenseType": { + "type": "integer" + }, + "expireDate": { + "type": "text" + }, + "targetNodesCount": { + "type": "integer" + }, + "realNodesCount": { + "type": "integer" + }, + "targetPodsCount": { + "type": "integer" + }, + "realPodsCount": { + "type": "integer" + }, + "targetSvcsCount": { + "type": "integer" + }, + "realSvcsCount": { + "type": "integer" + }, + "targetCoreCount": { + "type": "integer" + }, + "realCoreCount": { + "type": "integer" + }, + "allowableRange": { + "type": "integer" + }, + "licenseClusterId": { + "type": "keyword" + }, + "tenantId": { + "type": "keyword" + }, + "checkTime": { + "type": "date", + "format": "epoch_millis" + }, + "checkResult": { + "type": "integer" + } + } + }, + "aliases": { + "'${DEST_INDEX}'": {} + } +}' \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh new file mode 100644 index 0000000..b1de084 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/6_license_history_reindex_to_dest_from_source.sh @@ -0,0 +1,32 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${SOURCE_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${source_index_date}'" + }, + "dest": { + "index": "'${dest_index_date}'" + }, + "script": { + "lang": "painless", + "source": "ctx._source.checkTime = Instant.ofEpochSecond(ctx._source.checkTime).toEpochMilli()" + } + }' +done \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh new file mode 100644 index 0000000..e7e0a5c --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/7_license_history_reindex_to_source_from_dest.sh @@ -0,0 +1,30 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${source_index_date} + + curl -X POST 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_reindex?wait_for_completion=false' -H 'Content-Type: application/json' -d '{ + "source": { + "index": "'${dest_index_date}'" + }, + "dest": { + "index": "'${source_index_date}'" + } + }' +done diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh new file mode 100644 index 0000000..3d63181 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/8_license_history_delete_dest_index.sh @@ -0,0 +1,21 @@ +temp=$(kubectl get svc -n imxc -o wide | grep elasticsearch | grep NodePort | awk {'print $5'}) +export ES_NODEPORT=${temp:5:(-4)} +echo $ES_NODEPORT + +export MASTER_IP=$(kubectl get nodes -o wide | grep master | awk {'print $6'}) +echo $MASTER_IP + +SOURCE_INDEX='license_history' +DEST_INDEX='license_history_backup' + +curl -X GET 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/_cat/indices' | grep "${DEST_INDEX}" | awk '{print $3}' | while read line +do + date=${line: (-11)} + source_index_date=${SOURCE_INDEX}${date} + dest_index_date=${DEST_INDEX}${date} + + echo $source_index_date + echo $dest_index_date + + curl -X DELETE 'http://'"${MASTER_IP}"':'"${ES_NODEPORT}"'/'${dest_index_date} +done diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt new file mode 100644 index 0000000..95900be --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/es-reindex-3.2.0/manual.txt @@ -0,0 +1,31 @@ +** 두 인덱스간에 데이터 복재가 잘 됐는지 확인해가며 실행 ** + +1) 1_kubernete_event_info_create_dest_source_index.sh 스크립트 실행 + : 기존 인덱스에 새로운 데이터 타입 매핑작업 + : 기존 인덱스 데이터 백업용 인덱스 매핑작업 + +2) 2_kubernete_event_info_reindex_to_dest_from_source.sh 스크립트 실행 + : 기존 인덱스 데이터 백업용 인덱스로 리인덱싱 + +3) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 백업용 인덱스에 기존 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +4) 3_kubernete_event_info_reindex_to_source_from_dest.sh 스크립트 실행 + : 기존 인덱스 삭제 + : 새로 매핑된 기존 인덱스에 백업용 인덱스에 담긴 데이터 다시 리인덱싱 + +5) curl -X GET http://{IP}:{PORT}/_cat/indices?pretty | grep kubernete_event_info + : 새로 매핑된 인덱스에 백업용 인덱스 데이터가 백업될때까지 대기하기 + : 7번째 칸에 숫자가 일자별 인덱스 숫자와 동일할때까지 대기하기 + +6) 4_kubernete_event_info_delete_dest_index.sh 스크립트 실행 + : 백업용 인덱스 삭제 + +** 아래 스크립트도 위와같은 순서로 진행 ** +** grep license_history 로 변경해서 데이터 복재 확인 ** +5_license_history_create_dest_source_index.sh +6_license_history_reindex_to_dest_from_source.sh +7_license_history_reindex_to_source_from_dest.sh +8_license_history_delete_dest_index.sh + diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql new file mode 100644 index 0000000..c8252dd --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jaeger_menumeta.psql @@ -0,0 +1,21 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. +-- service - active transaction 삭제 +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Active Transaction'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 26; + + +-- service - overview 추가 +-- auth_resource2 +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); + +-- user_permission2 +INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql new file mode 100644 index 0000000..4541fb2 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/memu_meta/jspd_menumeta.psql @@ -0,0 +1,22 @@ +-- 이미 존재한다는 (insert 시) 에러메세지나 , 존재하지 않는다는 (delete 시) 에러메세지는 무시하셔도 무방합니다. + +-- service - overview 삭제 +-- user_permission2 +DELETE FROM public.user_permission2 WHERE auth_resource_id = (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview') AND user_id = 'owner'; + +-- menu_meta +DELETE FROM public.menu_meta WHERE id = 22; + +-- auth_resource2 +DELETE FROM public.auth_resource2 WHERE name = 'Overview' AND parent_id = (select id from auth_resource2 where type='menu' and name='Services'); + +-- auth_resource3 +DELETE FROM public.auth_resource3 WHERE name = 'menu|Services|Overview'; + + +-- service - active transaction 추가 +-- auth_resource3 +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +-- menu_meta +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql new file mode 100644 index 0000000..7ed34ad --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.2.0.psql @@ -0,0 +1,803 @@ +UPDATE public.metric_meta2 SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)' WHERE id = 'container_memory_usage_by_workload'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP' WHERE id = 7; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' WHERE id = 4; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "INFO" + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 6; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' WHERE id = 3; \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql new file mode 100644 index 0000000..6b63e62 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.0.psql @@ -0,0 +1,919 @@ + +-- from diff + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +create table alert_config +( + id bigint not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + + + +alter table tenant_info + add delete_scheduler_date timestamp; + +alter table tenant_info + add tenant_init_clusters varchar(255); + +alter table cloud_user + add dormancy_date timestamp; + +alter table cloud_user + add status varchar(255) default 'use'::character varying not null; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check|Check Script'; + +-- DELETE +-- FROM public.auth_resource3 +-- WHERE name = 'menu|Health Check'; + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +UPDATE public.menu_meta +SET position = 10::integer +WHERE id = 80::bigint; + +UPDATE public.menu_meta +SET position = 99::integer +WHERE id = 90::bigint; + + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"', true); + + + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); + + +---public.metric_meta2 +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}} + node_memory_SReclaimable_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024'::text WHERE id LIKE 'node#_memory#_used' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100'::text WHERE id LIKE 'host#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = 'sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))'::text WHERE id LIKE 'host#_fs#_total#_by#_mountpoint' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100'::text WHERE id LIKE 'cluster#_memory#_usage' ESCAPE '#'; + + +UPDATE public.metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - (node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}} + node_memory_SReclaimable_bytes{xm_entity_type=''Node'', {filter}})) >= 0 or (node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} - node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}})) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} * 100'::text WHERE id LIKE 'node#_memory#_usage' ESCAPE '#'; + +UPDATE public.metric_meta2 SET expr = '(node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})'::text WHERE id LIKE 'host#_memory#_used' ESCAPE '#'; + + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_error_rate', 'Service Pod Transaction Error Rate', 'The number of transaction error rate for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.', '2022-02-15 18:08:58.18', '2022-02-15 18:08:58.18'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_rate', 'Service Transaction Error Rate', 'Service Transaction Error Rate', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.', '2022-02-15 14:33:00.118', '2022-02-15 15:40:17.64'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_elapsed_time_avg', 'Service Transaction Elapsed Time (avg)', 'Service Average Elapsed Time', 'sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2021-11-15 16:09:34.233', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_elapsed_time_avg', 'Service Pod Transaction Elapsed Time (avg)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Pod Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.', '2022-02-15 18:04:55.228', '2022-02-15 18:04:55.228'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_error_count', 'Service Transaction Error Count', 'Service Transaction Error Count', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) ', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Error Request count:{{humanize $value}}%|{threshold}%.', '2021-11-15 16:10:31.352', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_txn_per_sec', 'Service Transaction Count (per Second)', 'Service Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 't', 'SVC:{{$labels.xm_service_name}} Svc Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2021-11-15 16:11:19.606', '2021-11-15 16:12:21.335'); +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES +('imxc_jspd_pod_txn_per_sec', 'Service Pod Transaction Count (per sec)', 'The number of transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))', 'Request', 'Service', NULL, 't', 'f', 'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-02-15 17:59:39.45', '2022-02-15 17:59:39.45'); + + + +-- Auto-generated SQL script #202202221030 +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_system_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_system_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_usage_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100' + WHERE id='container_cpu_user_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)' + WHERE id='container_cpu_user_core_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_limit_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_reads_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_fs_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)' + WHERE id='container_fs_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024' + WHERE id='container_fs_writes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_cache_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_max_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824' + WHERE id='container_memory_swap_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_usage_bytes_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100)' + WHERE id='container_memory_usage_by_workload'; +UPDATE public.metric_meta2 + SET expr='sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024' + WHERE id='container_memory_working_set_bytes_by_workload'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_active_txn_per_sec', 'Service Active Transaction Count (per Second)', 'Service Active Transaction Count (per Second)', 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:51:45.946', '2022-03-11 15:51:45.946') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count {{filter}}[1m]))' +WHERE id = 'imxc_jspd_active_txn_per_sec'; + +INSERT INTO public.metric_meta2 (id, meta_name, description, expr, resource_type, entity_type, groupby_keys, in_use, anomaly_score, message, created_date, modified_date) VALUES('imxc_jspd_pod_active_txn_per_sec', 'Service Pod Active Transaction Count (per sec)', 'The number of active transaction counts per second for pod', 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))', 'Request', 'Service', NULL, true, false, 'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.', '2022-03-11 15:53:29.252', '2022-03-11 15:53:29.252') ON +CONFLICT (id) DO +UPDATE +SET + expr = 'sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))' +WHERE id = 'imxc_jspd_pod_active_txn_per_sec'; + + +--public.agent_install_file_info + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql new file mode 100644 index 0000000..e84e9be --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.3.2.psql @@ -0,0 +1,459 @@ + UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent + spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home' WHERE id = 2; + +UPDATE public.agent_install_file_info SET yaml = '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent + spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config + ' WHERE id = 6; \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql new file mode 100644 index 0000000..0d20f2c --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.1.psql @@ -0,0 +1,1379 @@ +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +-- 더존(3.3.2) 에서 누락되었던 항목 모두 추가 +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'imxc-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api-demo', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui-demo', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +UPDATE public.agent_install_file_info +SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath:88888889 + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text +WHERE id = 2::bigint; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='topology_idx'; + +UPDATE public.common_setting +SET code_value='spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', + code_group='storageidx' +WHERE code_id='trace_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='event_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='sparse_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='anomaly_idx'; + +UPDATE public.common_setting +SET code_value='alert_event_history', + code_group='storageidx' +WHERE code_id='alert_idx'; + +UPDATE public.common_setting +SET code_group='storageidx' +WHERE code_id='audit_idx'; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +'::text WHERE id = 4::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1'::text WHERE id = 5::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP'::text WHERE id = 7::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home'::text WHERE id = 2::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +ALTER TABLE public.alert_rule_config_info ALTER COLUMN config_data TYPE text; + +update alert_rule_config_info +set config_data = '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"' +where config_id = 'rules'; + +ALTER TABLE public.alert_config_info ALTER COLUMN config_data TYPE text, ALTER COLUMN config_default TYPE text; + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql new file mode 100644 index 0000000..5c5d3c9 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.2.psql @@ -0,0 +1,8 @@ +-- admin의 owner 속성 추가 +UPDATE cloud_user SET is_tenant_owner = true WHERE user_id = 'admin'; + +-- owner에 대한 종속성을 admin으로 이관기능(필요하면 사용) +UPDATE auth_resource3 SET name = replace(name, 'owner', 'admin') WHERE name like '%|owner|%'; + +-- CLOUD-2305 node_memory_used metric_meta node_memory_SReclaimable_bytes 제거 패치문 반영 +UPDATE metric_meta2 SET expr = '((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024' WHERE id = 'node_memory_used'; diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql new file mode 100644 index 0000000..02f01db --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.3.psql @@ -0,0 +1,361 @@ +-- agent_install_file_info +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 3::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +-- CLOUD-2798 pod_phase_count_by_cluster metric_meta 수정 +UPDATE metric_meta2 SET expr = 'count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))' WHERE id = 'pod_phase_count_by_cluster'; + +-- node_memory_usage 수정 +update metric_meta2 set expr = 'sum by (xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' where id = 'node_memory_usage'; \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql new file mode 100644 index 0000000..7c582c5 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.6.psql @@ -0,0 +1,360 @@ +-- CLOUD-3473 Memory capacity 조회 쿼리 수정 +update metric_meta2 set description = 'imxc_kubernetes_node_resource_capacity_memory', +expr = 'sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})' where id = 'cluster_memory_capacity'; + +-- module명 metricdata owner_name 와 일치하도록 변경 +update common_setting set code_value ='cmoa-collector' where code_id = 'Cloudmoa Collector'; +update common_setting set code_value ='imxc-api' where code_id = 'Api Server'; +update common_setting set code_value ='imxc-ui' where code_id = 'Ui Server'; +update common_setting set code_value ='cloudmoa-trace-agent' where code_id = 'Trace Agent'; + +-- CLOUD-4795 Contaeird 환경 Container Network 수집 불가 건 확인 +-- 22.10.08 현대카드 대응 건으로 release 3.4.6에 반영 +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +'::text WHERE id = 6::bigint; + +UPDATE public.agent_install_file_info SET yaml = '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config'::text WHERE id = 3::bigint; + diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql new file mode 100644 index 0000000..92344db --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.7.psql @@ -0,0 +1,102 @@ +-- CLOUD-4752 node_memory_usage alert 관련 쿼리 수정 +update metric_meta2 set +expr = 'sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100' +where id = 'node_memory_usage'; + +-- CLOUD-6474 node-exporter | GPMAXPROCS 세팅 +-- Auto-generated SQL script #202211241543 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +' + WHERE id=4; \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql new file mode 100644 index 0000000..ea66c68 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_3.4.8.psql @@ -0,0 +1,387 @@ +-- CLOUD-6526 host 관련 쿼리 수정 +-- 수집된 메트릭 시간차로 인해 데이터 표출이 안되는걸 방지하기 위해 rate 5m 추가 +UPDATE metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )' +WHERE id='host_network_io_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )' +WHERE id = 'host_disk_read_write_byte'; + +UPDATE public.metric_meta2 SET expr = 'sum by (instance) ( +(rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or +(rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))' +WHERE id = 'host_disk_iops'; + +-- CLOUD-8671 Metric-Agent | 데이터 필터링 설정 추가 +-- Workload > Pod 화면 등에 Docker 런타임 환경의 자원 사용량이 2배 가량으로 보이던 문제 픽스 +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=3; + +UPDATE public.agent_install_file_info + SET yaml='--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +' + WHERE id=6; diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql new file mode 100644 index 0000000..99d1dbe --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210503.psql @@ -0,0 +1,2844 @@ +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS warning_sign character VARYING(255); +ALTER TABLE alert_rule ADD COLUMN IF NOT EXISTS critical_sign character VARYING(255); + +CREATE TABLE IF NOT EXISTS public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +) + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( + label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or + label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587') + WHERE public.metric_meta2.id = 'node_contextswitch_and_filedescriptor'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_user_core_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_system_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()) + WHERE public.metric_meta2.id = 'container_cpu_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_reads_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_limit_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_writes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.o + wner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_fs_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_max_usage_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_bytes_by_workload'; + + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024 *100)', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_usage_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_swap_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_working_set_bytes_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_memory_cache_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_receive_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()) + WHERE public.metric_meta2.id = 'container_network_transmit_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_pod_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()) + WHERE public.metric_meta2.id = 'count_container_not_running_by_workload'; + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + ON CONFLICT (id) + DO + UPDATE SET (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) + = ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])>1','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()) + WHERE public.metric_meta2.id = 'cotainer_restart_count_by_workload'; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', '--- + apiVersion: v1 + kind: Service + metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE + spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/prom/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + ', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000', null) + WHERE public.agent_install_file_info.id = 4; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container_name] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.15') + WHERE public.agent_install_file_info.id = 3; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', '--- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloudmoa-cluster-role + rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE + subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE + roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE + spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudmoa-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-agent + spec: + selector: + matchLabels: + app: cloudmoa-agent + template: + metadata: + labels: + app: cloudmoa-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-agent + image: $DOCKER_REGISTRY_URL/imxc/imxc-agent:$IMAGE_TAG + imagePullPolicy: Always + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: KAFKA_SERVER + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER + value: $COLLTION_SERVER_API_IP:$COLLECTION_SERVER_API_NETTY_PORT + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LOG_LEVEL + value: "DEBUG" + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000', null) + WHERE public.agent_install_file_info.id = 2; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '--- + # VERSION : 20190227142300 + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-prometheus-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + prometheus.yml: | + global: + scrape_interval: 15s + # Attach these labels to any time series or alerts when communicating with + # external systems. + external_labels: + monitor: ''5s-monitor'' + + #kafka writer only + no_local_disk_write: true + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + # + + # + # rule_files: + # - "scaling.rules" + + # i suppose my code in the remote kafka write is something wrong ... should append a double quote character at the end of the url + remote_write: + - url: kafka://$COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT/remote_prom?encoding=proto3&compression=snappy + + scrape_configs: + + # Scrape config for nodes (kubelet). + # + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can''t connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: ''kubernetes-kubelet'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + # copied from https://github.com/kayrus/prometheus-kubernetes/blob/master/prometheus-configmap.yaml + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [container] + regex: (.+) + action: keep + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cloudmoa-prometheus + type: ClusterIP + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-prometheus + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-prometheus + spec: + selector: + matchLabels: + app: cloudmoa-prometheus + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-prometheus + spec: + containers: + - name: cloudmoa-prometheus + image: $DOCKER_REGISTRY_URL/imxc/metric-agent:$IMAGE_TAG + ports: + - containerPort: 9090 + args: + - --config.file=/etc/prometheus/prometheus.yml + #- --log.level=debug + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: "/prometheus" + name: data + - mountPath: /etc/prometheus/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + restartPolicy: Always + volumes: + - emptyDir: {} + name: data + - name: config-volume + configMap: + name: cloudmoa-prometheus-configuration + ', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000', '1.16') + WHERE public.agent_install_file_info.id = 6; + + +INSERT INTO public.agent_install_file_info (id, name, type, description, yaml, use_yn, created_date, modified_date, version) +VALUES (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) +ON CONFLICT (id) +DO + UPDATE SET (id, name, type, description, yaml, use_yn, created_date, modified_date, version) + = (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. + ', '--- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cloudmoa-jaeger-collector-configuration + namespace: $CLOUDMOA_NAMESPACE + data: + strategies.json: | + { + "default_strategy": { + "type": "probabilistic", + "param": 0.1 + } + } + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-deployment + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + jaeger-infra: collector-pod + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaeger/jaeger-collector:$IMAGE_TAG + name: jaeger-collector + args: + - --sampling.strategies-file=/etc/jaeger-collector/strategies.json + - --sampling.strategies-reload-interval=60s + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 14267 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 14250 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: SPAN_STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: $COLLTION_SERVER_KAFKA_IP:$COLLTION_SERVER_KAFKA_INTERFACE_PORT + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-collector" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: API_SERVER_LICENSE + value: $COLLTION_SERVER_API_IP:8080 + + volumeMounts: + - mountPath: /etc/jaeger-collector + name: config-volume + + volumes: + - name: config-volume + configMap: + name: cloudmoa-jaeger-collector-configuration + --- + apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-jaeger-collector + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + jaeger-infra: collector-service + spec: + ports: + - name: jaeger-collector-tchannel + port: 14267 + protocol: TCP + targetPort: 14267 + - name: jaeger-collector-metrics + port: 14269 + targetPort: 14269 + - name: jaeger-collector-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - name: jaeger-collector-zipkin + port: 9411 + targetPort: 9411 + selector: + jaeger-infra: collector-pod + type: ClusterIP + --- + apiVersion: v1 + kind: List + items: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + selector: + matchLabels: + app: jaeger + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5778" + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/jaegertracing/jaeger-agent:$IMAGE_TAG + name: jaeger-agent + args: ["--reporter.grpc.host-port", "cloudmoa-jaeger-collector:14250"] + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: LOG_MAXBACKUPS + value: "3" + - name: LOG_MAXSIZE + value: "100" + - name: LOG_STDOUT + value: "TRUE" + - name: LOG_FILENAME + value: "jaeger-agent" + + - apiVersion: v1 + kind: Service + metadata: + name: jaeger-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: jaeger + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app.kubernetes.io/name: jaeger + app.kubernetes.io/component: agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000', null) + WHERE public.agent_install_file_info.id = 7; + +--Menu Resource +--Infrastructure +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Topology'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Resource Usage'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Resource Usage'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (SELECT id FROM auth_resource3 WHERE name='menu|Infrastructure|Namespace'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Namespace'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Nodes'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Infrastructure|Node Details'); + +--Workloads +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Deploy List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Cron Jobs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Workloads|Pods'); + +--Services +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Structure'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 3 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Services|Detail'); + +--Statistics & Analysis +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Performance Trends'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert Analysis'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Alert History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Anomaly Score'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Job History'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Log Viewer'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Event Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Container Life Cycle'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Statistics & Analysis|Service Traces'); + +--Reports +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (62, 'Templates', NULL, 1, 'reportSettings', (select id from auth_resource3 where name='menu|Reports|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Reports|Templates'); + +--Dashboards +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Documents'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Dashboards|Templates'); + +--Hosts +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (80, 'Hosts', '12.Hosts', 1, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Topology'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Overview'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|List'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Detail'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Hosts|Group'); + +--Settings +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (90, 'Settings', '08.Setting', 10, NULL, (select id from auth_resource3 where name='menu|Settings'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|User & Group'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Host Alerts'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Sparse Logs'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Metric Meta'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|General'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Notification'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Alias'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|License'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 2 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Settings|Agent Installation'); + +--Health Check +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) +VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0) +ON CONFLICT (id) + DO + UPDATE SET scope_level = 0 + WHERE public.menu_meta.auth_resource3_id = (SELECT id FROM auth_resource3 WHERE name = 'menu|Health Check|Check Script'); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql new file mode 100644 index 0000000..60ad862 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/patch/postgres_patch_R30020210730.psql @@ -0,0 +1,4 @@ +alter table cloud_user alter column log_in_count set default 0; +alter table cloud_user alter column user_lock set default false; + +UPDATE public.metric_meta2 SET meta_name = 'Number of Containers Restart', description = 'Number of Containers Restart (10m)', expr = 'increase(imxc_kubernetes_container_restart_count{{filter}}[10m])', resource_type = 'State', entity_type = 'Workload', groupby_keys = null, in_use = true, anomaly_score = false, message = 'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.', created_date = '2021-06-23 09:30:38.646312', modified_date = '2021-06-23 09:30:38.646312' WHERE id = 'cotainer_restart_count_by_workload'; \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql new file mode 100644 index 0000000..c8deff4 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_ddl.psql @@ -0,0 +1,1667 @@ +CREATE TABLE public.tenant_info ( + id character varying(255) NOT NULL, + name character varying(255) NOT NULL, + in_used boolean DEFAULT true, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + delete_scheduler_date timestamp without time zone NULL, + contract_id bigint NOT NULL, + tenant_init_clusters character varying(255) NULL +); +ALTER TABLE ONLY public.tenant_info ADD CONSTRAINT tenant_info_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + cluster_id character varying(255) NOT NULL, + description character varying(255), + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + namespace character varying(255) DEFAULT 'default'::character varying +); + +ALTER TABLE public.alert_group OWNER TO admin; + +ALTER TABLE ONLY public.alert_group + ADD CONSTRAINT alert_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX alert_group_name_uindex ON public.alert_group USING btree (name); + +CREATE TABLE public.alert_target ( + id bigint NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + cluster_id character varying(255) NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + alert_group_id bigint, + namespace character varying(255) +); + +ALTER TABLE public.alert_target OWNER TO admin; + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT alert_target_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_target + ADD CONSTRAINT fkjrvj775641ky7s0f82kx3sile FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + + + +CREATE TABLE public.report_template ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + enable boolean NOT NULL, + metric_data text, + template_data text, + title character varying(255) +); + +ALTER TABLE public.report_template OWNER TO admin; + +ALTER TABLE ONLY public.report_template + ADD CONSTRAINT report_template_pkey PRIMARY KEY (id); + +CREATE TABLE public.alert_event ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + alert_name character varying(255) NOT NULL, + cluster_id character varying(255) NOT NULL, + data text NOT NULL, + entity_id character varying(255) NOT NULL, + entity_type character varying(255) NOT NULL, + level character varying(255) NOT NULL, + meta_id character varying(255) NOT NULL, + namespace character varying(255), + starts_at bigint NOT NULL, + threshold character varying(255) NOT NULL, + value character varying(255) NOT NULL, + message character varying(255), + ends_at bigint, + status character varying(20) NOT NULL, + hook_collect_at bigint +); + +ALTER TABLE public.alert_event OWNER TO admin; + +CREATE TABLE public.metric_meta2 ( + id character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + description character varying(255) NOT NULL, + expr text NOT NULL, + resource_type character varying(255), + entity_type character varying(255) NOT NULL, + groupby_keys character varying(255), + in_use boolean DEFAULT false NOT NULL, + anomaly_score boolean DEFAULT false NOT NULL, + message character varying(255) NOT NULL, + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + +ALTER TABLE public.metric_meta2 OWNER to admin; + +ALTER TABLE ONLY public.metric_meta2 + ADD CONSTRAINT metric_meta2_pk PRIMARY KEY (id); + +CREATE TABLE public.alert_rule ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + critical float, + name character varying(255), + warning float, + alert_group_id bigint, + alert_rule_meta_id character varying(255) NOT NULL, + alert_target_id bigint, + duration character varying(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + warning_sign character varying(255), + critical_sign character varying(255) +); + +ALTER TABLE public.alert_rule OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT alert_rule_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk6b09d1xfyago6wiiqhdiv03s3 FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fk8wkucwkgr48hkfg8cvuptww0f FOREIGN KEY (alert_group_id) REFERENCES public.alert_group(id); + +ALTER TABLE ONLY public.alert_rule + ADD CONSTRAINT fkiqaskea7ts0f872u3nx9ne25u FOREIGN KEY (alert_target_id) REFERENCES public.alert_target(id); + +CREATE TABLE public.alert_rule_meta ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + description text NOT NULL, + expr character varying(255) NOT NULL, + meta_name character varying(255) NOT NULL, + target character varying(255) NOT NULL, + message character varying(255) +); + +ALTER TABLE public.alert_rule_meta OWNER TO admin; + +ALTER TABLE ONLY public.alert_rule_meta + ADD CONSTRAINT alert_rule_meta_pkey PRIMARY KEY (id); + +CREATE SEQUENCE hibernate_sequence; + +CREATE TABLE public.cloud_group ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + name character varying(255) NOT NULL, + description character varying(255), + created_by character varying(255), + auth_resource_id bigint +); + +ALTER TABLE public.cloud_group OWNER TO admin; + +ALTER TABLE ONLY public.cloud_group + ADD CONSTRAINT cloud_group_pkey PRIMARY KEY (id); + +CREATE UNIQUE INDEX cloud_group_name_uindex ON public.cloud_group USING btree (name); + +CREATE TABLE public.cloud_user ( + user_id character varying(255) NOT NULL, + email character varying(255), + is_admin boolean NOT NULL, + phone character varying(255), + user_nm character varying(255) NOT NULL, + user_pw character varying(255) NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + dormancy_date timestamp without time zone NULL, + company character varying(255), + department character varying(255), + last_log_in_date timestamp without time zone, + "position" character varying(255), + use_ldap boolean NOT NULL, + auth_method character varying(255) NOT NULL, + log_in_count integer default 0 NOT NULL, + user_lock boolean default false NOT NULL, + user_lock_date timestamp without time zone, + tenant_id character varying(120), + is_tenant_owner boolean default false, + auth_resource_id bigint, + status character varying(255) default 'use' NOT NULL +); + +ALTER TABLE public.cloud_user OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user ADD CONSTRAINT cloud_user_pkey PRIMARY KEY (user_id); + +ALTER TABLE ONLY public.cloud_user + ADD CONSTRAINT cloud_user_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.menu_meta ( + id bigint NOT NULL, + description character varying(255), + icon character varying(255), + "position" integer NOT NULL, + url character varying(255), + auth_resource3_id bigint NOT NULL, + scope_level int default 0 +); + +ALTER TABLE public.menu_meta OWNER TO admin; + +ALTER TABLE ONLY public.menu_meta + ADD CONSTRAINT menu_meta_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.metric_base ( + meta_name character varying(255) NOT NULL, + provider character varying(255) NOT NULL, + description character varying(255) NOT NULL, + resource_type character varying(255), + diag_type character varying(255), + entity_type character varying(255) NOT NULL, + metric_type character varying(255) NOT NULL, + keys character varying(255), + created_date timestamp without time zone DEFAULT now() NOT NULL, + modified_date timestamp without time zone DEFAULT now() NOT NULL +); + + +ALTER TABLE public.metric_base OWNER TO admin; + +ALTER TABLE ONLY public.metric_base + ADD CONSTRAINT metric_base_pk PRIMARY KEY (meta_name); + +CREATE TABLE public.report_static ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone NOT NULL, + modified_by character varying(255), + modified_date timestamp without time zone NOT NULL, + cron_exp character varying(255), + metric_data text, + template_data text, + title character varying(255), + type character varying(255), + report_template_id bigint +); + +ALTER TABLE public.report_static OWNER TO admin; + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT report_static_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.report_static + ADD CONSTRAINT fk7o821ym9a57lrcfipf928cfpe FOREIGN KEY (report_template_id) REFERENCES public.report_template(id); + +CREATE TABLE public.user_group ( + user_group_id bigint NOT NULL, + user_id character varying(255) NOT NULL +); + +ALTER TABLE public.user_group OWNER TO admin; + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT user_group_pkey PRIMARY KEY (user_group_id, user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkooy6rip2craw6jy3geb5wnix6 FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + +ALTER TABLE ONLY public.user_group + ADD CONSTRAINT fkowo8h9te5nwashab3u30docg FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +CREATE TABLE public.cloud_user_profile ( + user_id character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone, + profile_image oid +); + +ALTER TABLE public.cloud_user_profile OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_profile + ADD CONSTRAINT cloud_user_profile_pkey PRIMARY KEY (user_id); + + +CREATE TABLE public.common_setting ( + code_id character varying(255) NOT NULL, + code_value character varying(255), + code_desc character varying(255), + code_auth character varying(255), + code_group character varying(255), + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.common_setting OWNER TO admin; + +ALTER TABLE ONLY public.common_setting + ADD CONSTRAINT common_setting_pkey PRIMARY KEY (code_id); + + + +CREATE TABLE public.dashboard_thumbnail ( + id bigint NOT NULL, + thumbnail_image oid, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + + +ALTER TABLE public.dashboard_thumbnail OWNER TO admin; + +ALTER TABLE ONLY public.dashboard_thumbnail + ADD CONSTRAINT dashboard_thumbnail_pkey PRIMARY KEY (id); + + + +CREATE TABLE public.notification_channel ( + id bigint NOT NULL, + created_by character varying(255), + created_date timestamp without time zone, + modified_by character varying(255), + modified_date timestamp without time zone, + cluster_id character varying(255), + config text, + name character varying(255), + type character varying(255) +); + +ALTER TABLE public.notification_channel OWNER TO admin; + +ALTER TABLE ONLY public.notification_channel + ADD CONSTRAINT notification_channel_pkey PRIMARY KEY (id); + + +CREATE TABLE public.notification_registry ( + id bigint NOT NULL, + alert_rule_id bigint NOT NULL, + notification_channel_id bigint +); + +ALTER TABLE public.notification_registry OWNER TO admin; + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT notification_registry_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk28xo8snm6fd19i3uap0oba0d1 FOREIGN KEY (notification_channel_id) REFERENCES public.notification_channel(id); + + +CREATE TABLE public.license_check_2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_id integer NOT NULL, + real_host_id integer NOT NULL, + imxc_cpu_count integer NOT NULL, + real_cpu_count integer NOT NULL, + target_clusters_count integer NOT NULL, + real_clusters_count integer NOT NULL, + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + features_bitmap integer NOT NULL, + allowable_range integer NOT NULL, + check_time timestamp without time zone NOT NULL, + check_result integer NOT NULL +); + +ALTER TABLE public.license_check_2 + ADD CONSTRAINT license_check_pkey PRIMARY KEY (id); + +CREATE INDEX license_check_check_time_idx ON license_check_2(check_time); + + +CREATE TABLE public.license_violation ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone +); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check_2(id); + +ALTER TABLE public.license_violation + ADD CONSTRAINT license_violation_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check_2(id); + +CREATE INDEX license_violation_check_time_idx ON license_violation(check_time); +CREATE INDEX license_violation_resolved_time_idx ON license_violation(resolved_time); + + +CREATE TABLE public.license_key ( + id bigint NOT NULL, + license_key text NOT NULL, + set_time timestamp NOT NULL, + in_used bool NULL, + tenant_id varchar NULL, + cluster_id bigint NULL, + CONSTRAINT license_key_pkey PRIMARY KEY (id) +); + +ALTER TABLE public.license_key ADD CONSTRAINT license_key_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +CREATE TABLE public.license_check2 ( + id bigint NOT NULL, + site_name character varying(255) NOT NULL, + license_type integer NOT NULL, + expire_date character varying(255) NOT NULL, + imxc_host_ids character varying(255), + real_host_ids character varying(255), + target_nodes_count integer NOT NULL, + real_nodes_count integer NOT NULL, + target_pods_count integer NOT NULL, + real_pods_count integer NOT NULL, + target_svcs_count integer NOT NULL, + real_svcs_count integer NOT NULL, + target_core_count integer NOT NULL, + real_core_count integer NOT NULL, + allowable_range integer NOT NULL, + license_cluster_id character varying(255), + check_time timestamp without time zone NOT NULL, + check_result integer NOT null +); + +ALTER TABLE public.license_check2 + ADD CONSTRAINT license_check2_pkey PRIMARY KEY (id); + +CREATE INDEX license_check2_time_idx ON license_check2(check_time); + +CREATE TABLE public.license_violation2 ( + id bigint not null, + check_id bigint not null, + check_time timestamp without time zone not null, + violation_item varchar not null, + allow_time timestamp without time zone not null, + resolved_id bigint, + resolved_time timestamp without time zone, + cluster_id varchar not null +); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_pkey PRIMARY KEY (id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_check_id_fk FOREIGN KEY (check_id) REFERENCES public.license_check2(id); + +ALTER TABLE public.license_violation2 + ADD CONSTRAINT license_violation2_resolved_id_fk FOREIGN KEY (resolved_id) REFERENCES public.license_check2(id); + +CREATE INDEX license_violation2_check_time_idx ON license_violation2(check_time); +CREATE INDEX license_violation2_resolved_time_idx ON license_violation2(resolved_time); + +CREATE TABLE public.license_key2 ( + id bigint not null, + license_key text not null, + set_time timestamp without time zone not null, + cluster_id varchar, + license_used bool not null +); + +ALTER TABLE public.license_key2 + ADD CONSTRAINT license_key2_pkey PRIMARY KEY (id); + +create table public.license_policy ( + policy_id character varying(255) NOT NULL, + policy_desc character varying(255), + term_year integer NOT NULL, + term_month integer NOT NULL, + term_day integer NOT NULL, + license_type character varying(255) NOT NULL, + allowable_range character varying(255) NOT NULL, + storage_capacity character varying(255) NOT NULL, + cluster_count character varying(255) NOT NULL, + node_count character varying(255) NOT NULL, + pod_count character varying(255) NOT NULL, + service_count character varying(255) NOT NULL, + core_count character varying(255) NOT NULL, + host_ids character varying(255) NOT NULL, + user_division character varying(255) NOT NULL, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.license_policy + ADD CONSTRAINT license_policy_pkey PRIMARY KEY (policy_id); + + +CREATE TABLE public.auth_resource2 ( + id bigint NOT NULL default nextval('hibernate_sequence'), + access_type integer NOT NULL, + name character varying(255) NOT NULL, + parent_id bigint, + type character varying(255) NOT NULL +); + +ALTER TABLE public.auth_resource2 OWNER TO admin; + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT auth_resource2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource2 + ADD CONSTRAINT resource_name_uniq UNIQUE (name, type, parent_id); + +--ALTER TABLE ONLY public.auth_resource2 +-- ADD CONSTRAINT auth_resource2_auth_resource_id_fk FOREIGN KEY (parent_id) REFERENCES public.auth_resource2(id); +-- +--ALTER TABLE ONLY public.menu_meta +-- ADD CONSTRAINT fk2tqq4ybf6w130fsaejhrsnw5s FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.user_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_id character varying(255) +); + +ALTER TABLE public.user_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.user_permission2 +-- ADD CONSTRAINT user_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +ALTER TABLE ONLY public.user_permission2 + ADD CONSTRAINT user_permission2_user_id_fk FOREIGN KEY (user_id) REFERENCES public.cloud_user(user_id); + + +CREATE TABLE public.group_permission2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + all_child boolean NOT NULL, + permission integer NOT NULL, + auth_resource_id bigint, + user_group_id bigint +); + +ALTER TABLE public.group_permission2 OWNER TO admin; + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.group_permission2 + ADD CONSTRAINT group_permission2_user_group_id_fk FOREIGN KEY (user_group_id) REFERENCES public.cloud_group(id); + +-- ALTER TABLE ONLY public.group_permission2 +-- ADD CONSTRAINT group_permission2_auth_resource2_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.resource_group2 ( + id int8 NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + "name" varchar(255) NOT NULL, + description varchar(255) NULL, + CONSTRAINT resource_group2_pkey PRIMARY KEY (id) +-- CONSTRAINT resource_group2_fk1 FOREIGN KEY (id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_group2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_group2 TO "admin"; + +CREATE TABLE public.resource_member2 ( + resource_group_id int8 NOT NULL, + auth_resource_id int8 NOT NULL, + CONSTRAINT resource_member2_pkey PRIMARY KEY (resource_group_id, auth_resource_id), + CONSTRAINT resource_member2_fkey1 FOREIGN KEY (resource_group_id) REFERENCES resource_group2(id) +-- CONSTRAINT resource_member2_fkey2 FOREIGN KEY (auth_resource_id) REFERENCES auth_resource2(id) +); + +ALTER TABLE public.resource_member2 OWNER TO "admin"; +GRANT ALL ON TABLE public.resource_member2 TO "admin"; + +CREATE TABLE public.dashboard2 ( + id bigint NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL, + layout text NOT NULL, + title character varying(255) NOT NULL, + auth_resource_id bigint NOT NULL, + created_by character varying(255) NOT NULL, + modified_by character varying(255) NOT NULL, + description character varying(255), + share boolean DEFAULT false +); + +ALTER TABLE public.dashboard2 OWNER TO admin; + +ALTER TABLE ONLY public.dashboard2 + ADD CONSTRAINT dashboard2_pkey PRIMARY KEY (id); + +-- ALTER TABLE ONLY public.dashboard2 +-- ADD CONSTRAINT dashboard_resource_fk FOREIGN KEY (auth_resource_id) REFERENCES public.auth_resource2(id); + +CREATE TABLE public.log_management ( + cluster_id varchar NOT NULL, + node_id varchar NOT NULL, + log_rotate_dir varchar, + log_rotate_count integer, + log_rotate_size integer, + log_rotate_management boolean NOT NULL, + back_up_dir varchar, + back_up_period integer, + back_up_dir_size integer, + back_up_management boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +alter table public.log_management add constraint log_management_pkey primary key (cluster_id, node_id); + +CREATE TABLE public.sampling_setting ( + service_id bigint NOT NULL, + service_name character varying(255), + sampling_type character varying(255), + sampling_param character varying(255), + cluster varchar, + namespace varchar, + cluster_id bigint +); +ALTER TABLE public.sampling_setting OWNER TO admin; + +ALTER TABLE ONLY public.sampling_setting + ADD CONSTRAINT sampling_setting_pkey PRIMARY KEY (service_id); + +CREATE TABLE public.operation_setting ( + id bigint NOT NULL, + service_id bigint NOT NULL, + sampling_type character varying(255), + sampling_param character varying(255), + operation_name character varying(255) +); + +ALTER TABLE public.operation_setting OWNER TO admin; + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.operation_setting + ADD CONSTRAINT operation_setting_fkey FOREIGN KEY (service_id) REFERENCES public.sampling_setting(service_id); + +CREATE TABLE public.cluster_setting ( + cluster_id bigint NOT NULL, + param_type character varying(255), + param_value character varying(255), + cluster_name varchar, + name character varying(255) +); + +ALTER TABLE ONLY public.cluster_setting + ADD CONSTRAINT cluster_setting_pkey PRIMARY KEY (cluster_id); + +CREATE TABLE public.alias_code ( + user_id varchar NOT NULL, + id varchar NOT NULL, + name varchar, + type varchar, + use_yn varchar, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.alias_code add constraint alias_code_pkey primary key (user_id, id); + +CREATE TABLE public.sparse_log_info ( + id varchar NOT NULL, + cluster_id varchar, + namespace varchar, + target_type varchar, + target_id varchar, + log_path varchar, + created_date timestamp, + modified_date timestamp, + threshold float4, + PRIMARY KEY ("id") +); + +CREATE TABLE public.view_code ( + user_id varchar NOT NULL, + view_id varchar NOT NULL, + json_data text, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.view_code add constraint view_code_pkey primary key (user_id, view_id); + +CREATE TABLE public.entity_black_list ( + entity_type varchar not null, + entity_name varchar not null, + cluster_id varchar not null, + namespace varchar, + black_list bool not null, + workload varchar(255) not null +); + +ALTER TABLE public.entity_black_list + ADD CONSTRAINT entity_black_list_pkey PRIMARY KEY (entity_type, entity_name, cluster_id, namespace); + +CREATE TABLE public.script_setting ( + id bigint NOT NULL, + name character varying(255), + agent_list character varying(255), + file_path character varying(255), + args character varying(255), + valid_cmd character varying(255), + valid_val character varying(255), + cron_exp character varying(255), + create_user character varying(255), + mtime BIGINT, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone +); + +ALTER TABLE ONLY public.script_setting + ADD CONSTRAINT script_setting_pkey PRIMARY KEY (id); + +CREATE TABLE public.agent_install_file_info ( + id bigint NOT NULL, + name character varying(255) NOT NULL, + type character varying(255) NOT NULL, + description text, + version character varying(255), + yaml text, + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.agent_install_file_info ADD CONSTRAINT agent_install_file_info_pkey PRIMARY KEY (id); + +create table auth_resource3( + id bigint NOT NULL default nextval('hibernate_sequence'), + name character varying(255) NOT NULL, + is_deleted boolean not null, + tenant_id character varying(255) +); + +ALTER TABLE public.auth_resource3 owner to admin; + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.auth_resource3 + ADD CONSTRAINT auth_resource3_name_uniq UNIQUE (name); + +create table resource_member3( + resource_group_id bigint not null, + auth_resource3_id bigint not null +); + +ALTER TABLE resource_member3 owner to admin; + +ALTER TABLE ONLY public.resource_member3 + ADD CONSTRAINT resource_member3_pkey primary key (resource_group_id, auth_resource3_id); + +ALTER TABLE ONLY public.auth_resource3 ADD CONSTRAINT auth_resource3_tenant_id_fk FOREIGN KEY (tenant_id) REFERENCES public.tenant_info(id); + +ALTER TABLE public.menu_meta ADD CONSTRAINT menu_meta_auth_resource3_fk FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.user_permission2 ADD CONSTRAINT user_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_group2 ADD CONSTRAINT resource_group2_auth_resource3_fk1 FOREIGN KEY (id) REFERENCES auth_resource3(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey1 FOREIGN KEY (resource_group_id) REFERENCES public.resource_group2(id); +ALTER TABLE public.resource_member3 ADD CONSTRAINT resource_member3_auth_resource3_fkey2 FOREIGN KEY (auth_resource3_id) REFERENCES auth_resource3(id); +ALTER TABLE public.group_permission2 ADD CONSTRAINT group_permission2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.dashboard2 ADD CONSTRAINT dashboard2_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_user ADD CONSTRAINT cloud_user_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); +ALTER TABLE public.cloud_group ADD CONSTRAINT cloud_group_auth_resource3_fk FOREIGN KEY (auth_resource_id) REFERENCES auth_resource3(id); + +CREATE DATABASE CONFIGS; +CREATE DATABASE keycloak; + +-- JSPD 옵션 값 테이블 +CREATE TABLE public.jspd_prop ( + code_id character varying(255) NOT NULL, + default_value character varying(255) NOT NULL, + description text, + code_type character varying(255), + input_type character varying(255), + input_props character varying(255), + use_yn boolean NOT NULL, + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); + +ALTER TABLE ONLY public.jspd_prop ADD CONSTRAINT jspd_prop_pkey PRIMARY KEY (code_id); + +-- JSPD 옵션 값 설정 LIST table +CREATE TABLE public.jspd_config ( + cluster_id character varying(255) NOT NULL, + namespace character varying(255) NOT NULL, + service character varying(255) NOT NULL, + code_id character varying(255), + code_value character varying(255), + code_type character varying(255), + created_date timestamp without time zone NOT NULL, + modified_date timestamp without time zone NOT NULL +); +-- ALTER TABLE public.jspd_prop +-- ADD input_type character varying(255); + +-- ALTER TABLE public.jspd_prop +-- ADD input_props character varying(255); + + +ALTER TABLE public.jspd_config + ADD CONSTRAINT jspd_config_pkey PRIMARY KEY (cluster_id, namespace, service, code_id); + +ALTER TABLE ONLY public.jspd_config + ADD CONSTRAINT jspd_config_code_id_fk FOREIGN KEY (code_id) REFERENCES public.jspd_prop(code_id); + +-- noti server table +CREATE TABLE public.alert_group_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + cluster_id varchar(255) NOT NULL, + description varchar(255), + name varchar(255) NOT NULL, + type varchar(255) NOT NULL, + namespace varchar(255) default 'default'::character varying, + destination varchar(255) NOT NULL, + created_by varchar(255) NOT NULL +); + +CREATE TABLE public.alert_target_v2 ( + id bigint NOT NULL, + created_date timestamp, + modified_date timestamp, + cluster_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + alert_group_id bigint, + namespace varchar(255) +); + +CREATE TABLE public.alert_rule_v2 ( + id bigint NOT NULL, + created_date timestamp NOT NULL, + modified_date timestamp NOT NULL, + critical double precision, + name varchar(255), + warning double precision, + alert_group_id bigint, + alert_rule_meta_id varchar(255) NOT NULL, + alert_target_id bigint, + duration varchar(255) NOT NULL, + pause boolean DEFAULT false NOT NULL, + critical_sign varchar(255), + warning_sign varchar(255), + destination varchar(255), + created_by varchar(255) +); + +ALTER TABLE public.alert_group_v2 ADD CONSTRAINT alert_group_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_id_pk PRIMARY KEY (id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_id_pk PRIMARY KEY (id); + +ALTER TABLE public.alert_target_v2 ADD CONSTRAINT alert_target_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_group_id_fk FOREIGN KEY (alert_group_id) REFERENCES public.alert_group_v2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_rule_meta_id_fk FOREIGN KEY (alert_rule_meta_id) REFERENCES public.metric_meta2(id); +ALTER TABLE public.alert_rule_v2 ADD CONSTRAINT alert_rule_v2_alert_target_id_fk FOREIGN KEY (alert_target_id) REFERENCES public.alert_target_v2(id); +ALTER TABLE ONLY public.notification_registry + ADD CONSTRAINT fk4lljw4fnija73tm3lthjg90rx FOREIGN KEY (alert_rule_id) REFERENCES public.alert_rule_v2(id); + + +-- cortex alert +create table public.alert_rule_config_info ( + config_id varchar not null, + config_data text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config_info +( + config_id varchar not null, + config_data text not null, + config_default text not null, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +create table alert_config +( + id varchar not null, + cluster_id varchar, + resolve_timeout varchar, + receiver varchar, + group_by varchar, + group_wait varchar, + group_interval varchar, + repeat_interval varchar, + routes_level varchar, + routes_continue varchar, + receiver_name varchar, + webhook_url varchar, + send_resolved varchar, + inner_route boolean, + inner_webhook boolean, + in_use boolean default true not null, + created_date timestamp, + modified_date timestamp +); + +ALTER TABLE public.alert_rule_config_info ADD CONSTRAINT alert_rule_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config_info ADD CONSTRAINT alert_config_info_config_id_pk PRIMARY KEY (config_id); +ALTER TABLE public.alert_config ADD CONSTRAINT alert_config_id_pk PRIMARY KEY (id); + +CREATE TABLE public.cloud_user_setting ( + user_id character varying(255) NOT NULL, + lang character varying(20) DEFAULT 'en', + theme character varying(20) DEFAULT 'dark', + access_token integer DEFAULT 30, + refresh_token integer DEFAULT 10080, + error_msg boolean DEFAULT false, + alert_sound boolean DEFAULT false, + session_persistence boolean DEFAULT true, + gpu_acc_topology boolean DEFAULT true, + created_date timestamp without time zone, + modified_date timestamp without time zone +); + +ALTER TABLE public.cloud_user_setting OWNER TO admin; + +ALTER TABLE ONLY public.cloud_user_setting ADD CONSTRAINT cloud_user_setting_pkey PRIMARY KEY (user_id); + +-------- 2022-05-31 KubeInfo flatting table -------- +CREATE TABLE cmoa_configmap_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + kind_status varchar(50), + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + binaryData text, + data text, + immutable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +----------------------- +CREATE TABLE cmoa_cronjob_active( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_active_apiVersion text, + status_active_fieldPath text, + status_active_kind text, + status_active_name text, + status_active_namespace text, + status_active_resourceVersion text, + status_active_uid text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_cronjob_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_failedJobsHistoryLimit text, + spec_schedule text, + spec_successfulJobsHistoryLimit text, + spec_suspend text, + status_lastScheduleTime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_daemonset_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + status_currentNumberScheduled text, + status_desiredNumberScheduled text, + status_numberAvailable text, + status_numberMisscheduled text, + status_numberReady text, + status_numberUnavailable text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_deployment_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceVersion text, + spec_replicas text, + spec_template_spec_containers_image text, + status_availableReplicas text, + status_readyReplicas text, + status_replicas text, + status_unavailableReplicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_addresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_addresses_ip text, + subset_addresses_hostname text, + subset_addresses_nodeName text, + subset_addresses_targetRef text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_base( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceVersion text, + metadata_annotations text, + metadata_creationTimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_notreadyaddresses( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_notreadyaddresses_ip text, + subset_notreadyaddresses_hostname text, + subset_notreadyaddresses_nodename text, + subset_notreadyaddresses_targetref text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_endpoint_ports( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + subset_ports_port text, + subset_ports_appprotocol text, + subset_ports_name text, + subset_ports_protocol text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_event_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + action text, + count text, + eventtime text, + firsttimestamp text, + involvedobject_apiversion text, + involvedobject_fieldpath text, + involvedobject_kind text, + involvedobject_name text, + involvedobject_namespace text, + involvedobject_resourceversion text, + involvedobject_uid text, + lasttimestamp text, + message text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + reason text, + related_apiversion text, + related_fieldpath text, + related_kind text, + related_name text, + related_namespace text, + related_resourceversion text, + related_uid text, + series_count text, + series_lastobservedtime text, + series_state text, + source_component text, + source_host text, + type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_resourceversion text, + spec_backofflimit text, + spec_completions text, + spec_parallelism text, + status_active text, + status_completiontime text, + status_failed text, + status_starttime text, + status_succeeded text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_job_template ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_template_spec_containers_args text, + spec_template_spec_containers_command text, + spec_template_spec_containers_image text, + spec_template_spec_containers_name text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_namespace_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_namespace text, + spec_finalizers text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_annotations text, + spec_podcidr text, + spec_taints text, + status_capacity_cpu text, + status_capacity_ephemeral_storage text, + status_capacity_hugepages_1gi text, + status_capacity_hugepages_2mi text, + status_capacity_memory text, + status_capacity_pods text, + status_allocatable_cpu text, + status_allocatable_ephemeral_storage text, + status_allocatable_hugepages_1gi text, + status_allocatable_hugepages_2mi text, + status_allocatable_memory text, + status_allocatable_pods text, + status_addresses text, + status_daemonendpoints_kubeletendpoint_port text, + status_nodeinfo_machineid text, + status_nodeinfo_systemuuid text, + status_nodeinfo_bootid text, + status_nodeinfo_kernelversion text, + status_nodeinfo_osimage text, + status_nodeinfo_containerruntimeversion text, + status_nodeinfo_kubeletversion text, + status_nodeinfo_kubeproxyversion text, + status_nodeinfo_operatingsystem text, + status_nodeinfo_architecture text, + status_volumesinuse text, + status_volumesattached text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_condition ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lastheartbeattime text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_node_image ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_images_names text, + status_images_sizebytes text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolume_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_awselasticblockstore text, + spec_azuredisk text, + spec_azurefile text, + spec_capacity text, + spec_claimref_apiversion text, + spec_claimref_fieldpath text, + spec_claimref_kind text, + spec_claimref_name text, + spec_claimref_namespace text, + spec_claimref_resourceversion text, + spec_claimref_uid text, + spec_csi text, + spec_fc text, + spec_flexvolume text, + spec_flocker text, + spec_gcepersistentdisk text, + spec_glusterfs text, + spec_hostpath text, + spec_iscsi text, + spec_local text, + spec_nfs text, + spec_persistentvolumereclaimpolicy text, + spec_photonpersistentdisk text, + spec_portworxvolume text, + spec_quobyte text, + spec_rbd text, + spec_scaleio text, + spec_storageclassname text, + spec_storageos text, + spec_volumemode text, + spec_vspherevolume text, + status_message text, + status_phase text, + status_reason text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_persistentvolumeclaim_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_accessmodes text, + spec_storageclassname text, + spec_volumemode text, + spec_volumename text, + status_accessmodes text, + status_capacity text, + status_phase text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + kind_status varchar(50), + metadata_uid varchar(40), + row_index int, + metadata_name text, + metadata_selflink text, + metadata_resourceversion text, + metadata_creationtimestamp varchar(25), + metadata_generatename text, + metadata_namespace text, + metadata_deletiontimestamp text, + metadata_deletiongraceperiodseconds text, + metadata_labels text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + spec_hostnetwork text, + spec_priorityclassname text, + spec_enableservicelinks text, + spec_priority text, + spec_schedulername text, + spec_hostpid text, + spec_nodename text, + spec_serviceaccount text, + spec_serviceaccountname text, + spec_dnspolicy text, + spec_terminationgraceperiodseconds text, + spec_restartpolicy text, + spec_securitycontext text, + spec_nodeselector_kubernetes_io_hostname text, + spec_tolerations text, + status_phase text, + status_hostip text, + status_podip text, + status_starttime text, + status_qosclass text, + status_reason text, + status_message text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_conditions ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_conditions_type text, + status_conditions_status text, + status_conditions_lasttransitiontime text, + status_conditions_reason text, + status_conditions_message text, + status_conditions_lastprobetime text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containerstatuses ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + status_containerstatuses_name text, + status_containerstatuses_ready text, + status_containerstatuses_restartcount text, + status_containerstatuses_image text, + status_containerstatuses_imageid text, + status_containerstatuses_containerid text, + status_containerstatuses_state_terminated_exitcode text, + status_containerstatuses_state_terminated_reason text, + status_containerstatuses_state_terminated_startedat text, + status_containerstatuses_state_terminated_finishedat text, + status_containerstatuses_state_terminated_containerid text, + status_containerstatuses_state_waiting_reason text, + status_containerstatuses_state_waiting_message text, + status_containerstatuses_state_running_startedat text, + status_containerstatuses_laststate_terminated_exitcode text, + status_containerstatuses_laststate_terminated_reason text, + status_containerstatuses_laststate_terminated_startedat text, + status_containerstatuses_laststate_terminated_finishedat text, + status_containerstatuses_laststate_terminated_containerid text, + status_containerstatuses_laststate_waiting_reason text, + status_containerstatuses_laststate_waiting_message text, + status_containerstatuses_laststate_running_startedat text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_containers ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_containers_name text, + spec_containers_image text, + spec_containers_env text, + spec_containers_resources_limits_cpu text, + spec_containers_resources_limits_memory text, + spec_containers_resources_requests_cpu text, + spec_containers_resources_requests_memory text, + spec_containers_volumemounts text, + spec_containers_securitycontext_privileged text, + spec_containers_command text, + spec_containers_ports text, + spec_containers_args text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_pod_volume ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_volumes_name text, + spec_volumes_hostpath text, + spec_volumes_secret text, + spec_volumes_configmap text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_replicaset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_availablereplicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_hard text, + spec_scopes text, + status_hard text, + status_used text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_resourcequota_scopeselector ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_scopeselector_matchexpressions_operator text, + spec_scopeselector_matchexpressions_scopename text, + spec_scopeselector_matchexpressions_values text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_name text, + metadata_resourceversion text, + metadata_ownerreferences text, + metadata_ownerReferences_kind varchar(30), + metadata_ownerReferences_uid varchar(40), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_deletiongraceperiodseconds text, + metadata_deletiontimestamp text, + metadata_labels text, + metadata_namespace text, + spec_clusterip text, + spec_externalips text, + spec_selector text, + spec_type text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_service_ports ( + kube_flatting_time bigint, + kind varchar(30), + metadata_uid varchar(40), + row_index int, + metadata_name text, + spec_ports_appprotocol text, + spec_ports_name text, + spec_ports_nodeport text, + spec_ports_port text, + spec_ports_protocol text, + spec_ports_targetport text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, kind, metadata_uid, row_index) +); +---------------------------- +CREATE TABLE cmoa_statefulset_base ( + kube_flatting_time bigint, + cluster_id varchar(255), + kind varchar(30), + metadata_uid varchar(40), + row_index int, + kind_status varchar(50), + metadata_annotations text, + metadata_creationtimestamp varchar(25), + metadata_labels text, + metadata_name text, + metadata_namespace text, + metadata_resourceversion text, + spec_replicas text, + status_readyreplicas text, + status_replicas text, + create_time timestamp default now(), + PRIMARY KEY (kube_flatting_time, cluster_id, kind, metadata_uid, row_index) +); + +CREATE TABLE public.api_error_history ( + id int8 NOT NULL, + api_msg varchar(255) NULL, + code varchar(255) NULL, + "exception" varchar(255) NULL, + http_error varchar(255) NULL, + http_status int4 NULL, + occureence_time varchar(255) NULL, + params varchar(255) NULL, + "path" varchar(255) NULL, + "type" varchar(255) NULL, + CONSTRAINT api_error_history_pkey PRIMARY KEY (id) +); + +CREATE TABLE public.metric_score ( + clst_id varchar(255) NOT NULL, + entity_id varchar(255) NOT NULL, + entity_type varchar(255) NOT NULL, + metric_id varchar(255) NOT NULL, + sub_key varchar(255) NOT NULL, + unixtime int4 NOT NULL, + anomaly bool NOT NULL, + cont_name varchar(255) NULL, + "instance" varchar(255) NULL, + "namespace" varchar(255) NULL, + node_id varchar(255) NULL, + pod_id varchar(255) NULL, + score int4 NOT NULL, + yhat_lower_upper json NULL, + CONSTRAINT metric_score_pkey PRIMARY KEY (clst_id, entity_id, entity_type, metric_id, sub_key, unixtime) +); + + +CREATE TABLE public.tenant_info_auth_resources ( + tenant_info_id varchar(255) NOT NULL, + auth_resources_id int8 NOT NULL, + CONSTRAINT tenant_info_auth_resources_pkey PRIMARY KEY (tenant_info_id, auth_resources_id), + CONSTRAINT uk_7s6l8e2c8gli4js43c4xoifcl UNIQUE (auth_resources_id) +); + + +-- public.tenant_info_auth_resources foreign keys + +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkkecsc13ydhwg8u05aumkqbnx1 FOREIGN KEY (tenant_info_id) REFERENCES public.tenant_info(id); +ALTER TABLE public.tenant_info_auth_resources ADD CONSTRAINT fkpvvec4ju3hsma6s1rtgvr4mf6 FOREIGN KEY (auth_resources_id) REFERENCES public.auth_resource3(id); \ No newline at end of file diff --git a/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql b/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql new file mode 100644 index 0000000..e6335f3 --- /dev/null +++ b/roles/cmoa_install/files/03-ddl-dml/postgres/postgres_insert_dml.psql @@ -0,0 +1,2380 @@ +INSERT INTO public.tenant_info (id, name, in_used, created_date, modified_date, contract_id) VALUES ('DEFAULT_TENANT', 'admin', true, now(), now(), 0); + +INSERT INTO public.auth_resource2 (id, access_type, name, parent_id, type) VALUES (-1, 4, 'null', NULL, 'null'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Infrastructure', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Workloads', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Services', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Diagnosis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Statistics & Analysis', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Reports', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Settings', -1 , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Hosts', -1, 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Dashboards', -1 , 'menu'); +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Health Check', -1, 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Namespace', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Nodes', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Node Details', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Usage', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Persistent Volume', (select id from auth_resource2 where type='menu' and name='Infrastructure') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Pods', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Cron Jobs', (select id from auth_resource2 where type='menu' and name='Workloads') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Deploy List', (select id from auth_resource2 where type='menu' and name='Workloads'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Structure', (select id from auth_resource2 where type='menu' and name='Services') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Services'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Diagnosis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Troubleshooting', (select id from auth_resource2 where type='menu' and name='Diagnosis') , 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Performance Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Anomaly Score', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Job History', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Log Viewer', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Event Logs', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alert Analysis', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Container Life Cycle', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Traces', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); +-- INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Resource Used Trends', (select id from auth_resource2 where type='menu' and name='Statistics & Analysis'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Reports'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'User & Group', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alerts', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Sparse Logs', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'General', (select id from auth_resource2 where type='menu' and name='Settings') , 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Metric Meta', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Notification', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Host Alerts', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'License', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Agent', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Alias', (select id from auth_resource2 where type='menu' and name='Settings'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Documents', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Templates', (select id from auth_resource2 where type='menu' and name='Dashboards'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Topology', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Overview', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'List', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Detail', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Group', (select id from auth_resource2 where type='menu' and name='Hosts'), 'menu'); + +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'CloudMOA - Nodes Resource', NULL, 'dashboard'); +INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES (4, 'Service Detail', NULL, 'dashboard'); + +--INSERT INTO public.auth_resource2 (access_type, name, parent_id, type) VALUES(4, 'Check Script', (select id from auth_resource2 where type='menu' and name='Health Check'), 'menu'); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards', false, null); +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Namespace', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Nodes', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Node Details', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Infrastructure|Resource Usage', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Pods', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Jobs', false, null); +-- NSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Cron Jobs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Workloads|Deploy List', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Structure', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Services|Active Transaction', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Diagnosis|Anomaly Score', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Performance Trends', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Anomaly Score', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Job History', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Log Viewer', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Event Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Alert Analysis', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Container Life Cycle', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Statistics & Analysis|Service Traces', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Reports|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|User & Group', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Sparse Logs', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|General', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Metric Meta', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Notification', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Host Alerts', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|License', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Alias', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Settings|Agent Installation', false, NULL); + + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Documents', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Dashboards|Templates', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Topology', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Overview', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|List', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Detail', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Hosts|Group', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('menu|Health Check|Check Script', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('userGroup|admin|default', false, null); + +--INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('user|admin|owner', false, 'DEFAULT_TENANT'); + +INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('admin', NULL, true, NULL, 'admin', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin')); +INSERT INTO public.cloud_group (id, created_date, modified_date, name, description) VALUES ((select id from auth_resource3 where name='userGroup|admin|default'), now(), now(), 'default', '기본그룹정의'); + +--INSERT INTO public.cloud_user (user_id, email, is_admin, phone, user_nm, user_pw, created_date, modified_date, company, department, last_log_in_date, "position", use_ldap, auth_method, log_in_count, user_lock, user_lock_date, tenant_id, is_tenant_owner, auth_resource_id) VALUES ('owner', NULL, false, NULL, 'owner', '$2a$10$a0XPdet9RCL8uF8ZVZ2Yzu4y0po5RWCesyB0e03MhrTIfG.0Y6xfS',now() , now() , NULL , NULL , NULL , NULL, false, 'default', 0, false, null, 'DEFAULT_TENANT', true, (select id from auth_resource3 where name='user|admin|owner')); + +INSERT INTO public.cloud_user_setting +(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +VALUES('admin', null, null, null, null, false, false, true, true, now(), null); + +--INSERT INTO public.cloud_user_setting +--(user_id, lang, theme, access_token, refresh_token, error_msg, alert_sound, session_persistence, gpu_acc_topology, created_date, modified_date) +--VALUES('owner', null, null, null, null, false, false, true, true, now(), null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|CloudMOA - Nodes Resource', false, null); +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('dashboard|admin|Service Detail', false, null); + +INSERT INTO public.auth_resource3 (name, is_deleted, tenant_id) VALUES ('cluster|cloudmoa', false, 'DEFAULT_TENANT'); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (0, 'Infrastructure', '01.Infrastructure', 0, NULL, (select id from auth_resource3 where name='menu|Infrastructure'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (1, 'Topology', NULL, 0, 'topologyInfra', (select id from auth_resource3 where name='menu|Infrastructure|Topology'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (2, 'Overview', NULL, 1, 'overViewInfra', (select id from auth_resource3 where name='menu|Infrastructure|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (3, 'Resource Usage', NULL, 2, 'resourceUsageInfra', (select id from auth_resource3 where name='menu|Infrastructure|Resource Usage'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (4, 'Namespace', NULL, 3, 'namespaceInfra', (select id from auth_resource3 where name='menu|Infrastructure|Namespace'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (5, 'Nodes', NULL, 4, 'nodesInfra', (select id from auth_resource3 where name='menu|Infrastructure|Nodes'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (6, 'Node Details', NULL, 5, 'nodeDetailInfra', (select id from auth_resource3 where name='menu|Infrastructure|Node Details'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (10, 'Workloads', '02.Workload', 1, NULL, (select id from auth_resource3 where name='menu|Workloads'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (11, 'Overview', NULL, 0, 'overviewWorkloads', (select id from auth_resource3 where name='menu|Workloads|Overview'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (12, 'deployList', NULL, 1, 'deployListWorkloads', (select id from auth_resource3 where name='menu|Workloads|Deploy List'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (17, 'Jobs', NULL, 6, 'jobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Jobs'), 2); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (18, 'Cron Jobs', NULL, 7, 'cronJobsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Cron Jobs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (19, 'Pods', NULL, 8, 'podsWorkloads', (select id from auth_resource3 where name='menu|Workloads|Pods'), 3); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (20, 'Services', '03.Service', 2, NULL, (select id from auth_resource3 where name='menu|Services'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (21, 'DataCenter Service', NULL, 0, 'topologyServices', (select id from auth_resource3 where name='menu|Services|Topology'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (22, 'ServiceOverview', NULL, 1, 'overviewServices', (select id from auth_resource3 where name='menu|Services|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (23, 'Cluster Service', NULL, 2, 'detailServices', (select id from auth_resource3 where name='menu|Services|Structure'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (24, 'List', NULL, 3, 'serviceList', (select id from auth_resource3 where name='menu|Services|List'), 3); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (25, 'Detail', NULL, 4, 'slasServices', (select id from auth_resource3 where name='menu|Services|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (26, 'Active Transaction', NULL, 5, 'overviewServiceJSPD', (select id from auth_resource3 where name='menu|Services|Active Transaction'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (30, 'Diagnosis', '05.Diagnosis', 4, NULL, (select id from auth_resource3 where name='menu|Diagnosis'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (31, 'Anomaly Score Detail', NULL, 0, 'anomalyScoreDiagnosis', (select id from auth_resource3 where name='menu|Diagnosis|Anomaly Score'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (40, 'Statistics & Analysis', '06.Statistics&Analysis', 5, NULL, (select id from auth_resource3 where name='menu|Statistics & Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (41, 'Performance Trends', NULL, 0, 'performanceTrendSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Performance Trends'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (42, 'Alert Analysis', NULL, 2, 'alertAnalysisSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert Analysis'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (43, 'Alert History', NULL, 3, 'alertHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Alert History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (44, 'Anomaly Score Analysis', NULL, 4, 'anomalyScoreSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Anomaly Score'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (45, 'Job History', NULL, 5, 'jobHistorySA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Job History'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (46, 'Sparse Log Analysis', NULL, 6, 'sparseLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (47, 'Log Viewer', NULL, 7, 'logViewerSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Log Viewer'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (48, 'eventLog Analysis', NULL, 8, 'eventLogSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Event Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (49, 'Container Life Cycle', NULL, 9, 'containerLifecycleSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Container Life Cycle'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (50, 'Service Trace Analysis', NULL, 10, 'serviceTraceSA', (select id from auth_resource3 where name='menu|Statistics & Analysis|Service Traces'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (60, 'Reports', '07.Report', 6, NULL, (select id from auth_resource3 where name='menu|Reports'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (61, 'Documents', NULL, 0, 'documentReport', (select id from auth_resource3 where name='menu|Reports|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (62, 'Templates', NULL, 1, 'templateReport', (select id from auth_resource3 where name='menu|Reports|Templates'), 2); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (65, 'Dashboards', '10.Dashboard', 7, NULL, (select id from auth_resource3 where name='menu|Dashboards'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (66, 'Documents', NULL, 0, 'documentDashboard', (select id from auth_resource3 where name='menu|Dashboards|Documents'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (67, 'Templates', NULL, 1, 'templateDashboard', (select id from auth_resource3 where name='menu|Dashboards|Templates'), 2); + + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (80, 'Hosts', '12.Hosts', 10, NULL, (select id from auth_resource3 where name='menu|Hosts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (81, 'Topology', null, 0, 'topologyHost', (select id from auth_resource3 where name='menu|Hosts|Topology'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (82, 'Overview', NULL, 1, 'overviewHost', (select id from auth_resource3 where name='menu|Hosts|Overview'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (83, 'List', NULL, 2, 'listHost', (select id from auth_resource3 where name='menu|Hosts|List'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (84, 'Detail', NULL, 3, 'detailHost', (select id from auth_resource3 where name='menu|Hosts|Detail'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (85, 'Group', NULL, 4, 'groupHost', (select id from auth_resource3 where name='menu|Hosts|Group'), 0); + +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (90, 'Settings', '08.Setting', 99, NULL, (select id from auth_resource3 where name='menu|Settings'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (91, 'User', NULL, 0, 'userGroupSettings', (select id from auth_resource3 where name='menu|Settings|User & Group'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (92, 'Alerts', NULL, 1, 'alertSettings', (select id from auth_resource3 where name='menu|Settings|Alerts'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (93, 'Host Alerts', NULL, 2, 'hostAlertSettings', (select id from auth_resource3 where name='menu|Settings|Host Alerts'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (94, 'Sparse Logs', NULL, 3, 'sparseLogSettings', (select id from auth_resource3 where name='menu|Settings|Sparse Logs'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (96, 'Metric Meta', NULL, 5, 'metricMetaSettings', (select id from auth_resource3 where name='menu|Settings|Metric Meta'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (97, 'Appearance', NULL, 6, 'appearanceSettings', (select id from auth_resource3 where name='menu|Settings|General'), 0); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (98, 'Notification', NULL, 7, 'notificationsSettings', (select id from auth_resource3 where name='menu|Settings|Notification'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (99, 'Agent', NULL, 8, 'agentSettings', (select id from auth_resource3 where name='menu|Settings|Agent'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (100, 'Alias', NULL, 9, 'aliasSettings', (select id from auth_resource3 where name='menu|Settings|Alias'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (101, 'License', NULL, 10, 'validationLicense', (select id from auth_resource3 where name='menu|Settings|License'), 2); +INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (102, 'agent Installation', NULL, 11, 'agentInstallationSettings', (select id from auth_resource3 where name='menu|Settings|Agent Installation'), 2); + +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (121, 'Health Check', '09.HealthCheck', 9, 'healthCHeck', (select id from auth_resource3 where name='menu|Health Check'), 0); +-- INSERT INTO public.menu_meta (id, description, icon, "position", url, auth_resource3_id, scope_level) VALUES (122, 'Check Script', NULL, 0, 'checkScript', (select id from auth_resource3 where name='menu|Health Check|Check Script'), 0); + +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Resource Usage'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Namespace'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Nodes'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Infrastructure|Node Details'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Deploy List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Cron Jobs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Workloads|Pods'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Topology'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Overview'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Structure'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|List'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Services|Detail'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Diagnosis|Anomaly Score'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Performance Trends'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert Analysis'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Alert History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Anomaly Score'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Job History'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Log Viewer'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Event Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Container Life Cycle'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Statistics & Analysis|Service Traces'), 'owner'); +-- +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Reports|Templates'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Documents'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Dashboards|Templates'), 'owner'); +-- +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|User & Group'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alerts'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Sparse Logs'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Metric Meta'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|General'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Notification'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Alias'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|License'), 'owner'); +--INSERT INTO public.user_permission2 VALUES (nextval('hibernate_sequence'), now(), now(), false, 4, (SELECT id FROM auth_resource3 WHERE NAME = 'menu|Settings|Agent Installation'), 'owner'); + +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cadvisor_version_info', 'cadvisor', 'A metric with a constant ''1'' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_periods_total', 'cadvisor', 'Number of elapsed enforcement period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_periods_total', 'cadvisor', 'Number of throttled period intervals.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_cfs_throttled_seconds_total', 'cadvisor', 'Total time duration the container has been throttled.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_load_average_10s', 'cadvisor', 'Value of container cpu load average over the last 10 seconds.', 'CPU', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_periods_total', 'cadvisor', 'Number of times processes of the cgroup have run on the cpu', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_run_seconds_total', 'cadvisor', 'Time duration the processes of the container have run on the CPU.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_schedstat_runqueue_seconds_total', 'cadvisor', 'Time duration processes of the container have been waiting on a runqueue.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_system_seconds_total', 'cadvisor', 'Cumulative system cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_usage_seconds_total', 'cadvisor', 'Cumulative cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_cpu_user_seconds_total', 'cadvisor', 'Cumulative user cpu time consumed in seconds.', 'CPU', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_limit_bytes', 'cadvisor', 'Number of bytes that can be consumed by the container on this filesystem.', NULL, NULL, 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_last_seen', 'cadvisor', 'Last time a container was seen by the exporter', NULL, NULL, 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_bytes_total', 'cadvisor', 'Cumulative count of bytes received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while receiving', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_receive_packets_total', 'cadvisor', 'Cumulative count of packets received', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_bytes_total', 'cadvisor', 'Cumulative count of bytes transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_errors_total', 'cadvisor', 'Cumulative count of errors encountered while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_dropped_total', 'cadvisor', 'Cumulative count of packets dropped while transmitting', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_transmit_packets_total', 'cadvisor', 'Cumulative count of packets transmitted', 'NIC', 'LOAD', 'Container', 'counter', 'interface', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_scrape_error', 'cadvisor', '1 if there was an error while getting container metrics, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_period', 'cadvisor', 'CPU period of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_quota', 'cadvisor', 'CPU quota of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_cache', 'cadvisor', 'Number of bytes of page cache memory.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failcnt', 'cadvisor', 'Number of memory usage hits limits', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_failures_total', 'cadvisor', 'Cumulative count of memory allocation failures.', 'Memory', 'LOAD', 'Container', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_max_usage_bytes', 'cadvisor', 'Maximum memory usage recorded in bytes', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_rss', 'cadvisor', 'Size of RSS in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_swap', 'cadvisor', 'Container swap usage in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_usage_bytes', 'cadvisor', 'Current memory usage in bytes, including all memory regardless of when it was accessed', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_memory_working_set_bytes', 'cadvisor', 'Current working set in bytes.', 'Memory', 'LOAD', 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_tcp_usage_total', 'cadvisor', 'tcp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'tcp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_network_udp_usage_total', 'cadvisor', 'udp connection usage statistic for container', 'Network', 'LOAD', 'Container', 'counter', 'udp_state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_cpu_shares', 'cadvisor', 'CPU share of the container', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_limit_bytes', 'cadvisor', 'Memory limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_spec_memory_swap_limit_bytes', 'cadvisor', 'Memory swap limit for the container.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_start_time_seconds', 'cadvisor', 'Start time of the container since unix epoch in seconds.', NULL, NULL, 'Container', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_tasks_state', 'cadvisor', 'Number of tasks in given state', NULL, NULL, 'Container', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds', 'prometheus', 'The HTTP request latencies in microseconds.', NULL, 'DURATION', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_duration_microseconds_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_requests_total', 'prometheus', 'Total number of scrapes by HTTP status code.', NULL, 'ERROR', 'Node', 'counter', 'code,method', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_bytes_average', 'cloudwatch', 'Bytes read from all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_count', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes_sum', 'prometheus', '', NULL, NULL, 'Node', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds', 'micrometer', 'Server Response in second', NULL, 'RATE', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_count', 'micrometer', 'the total number of requests.', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_sum', 'micrometer', 'the total time taken to serve the requests', NULL, NULL, 'Service', '', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_server_requests_seconds_max', 'micrometer', 'the max number of requests.', NULL, 'RATE', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_bytes_average', 'cloudwatch', 'Bytes written to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_loaded', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_classes_unloaded_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_live_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_max_data_size_bytes', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_allocated_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_memory_promoted_bytes_total', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_count', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_max', 'micrometer', 'jvm info', 'GC', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_gc_pause_seconds_sum', 'micrometer', 'jvm info', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_arp_entries', 'node_exporter', 'ARP entries by device', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_boot_time_seconds', 'node_exporter', 'Node boot time, in unixtime.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_context_switches_total', 'node_exporter', 'Total number of context switches.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_core_throttles_total', 'node_exporter', 'Number of times this cpu core has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'core', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_hertz', 'node_exporter', 'Current cpu thread frequency in hertz.', 'CPU', 'LOAD', 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_max_hertz', 'node_exporter', 'Maximum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_frequency_min_hertz', 'node_exporter', 'Minimum cpu thread frequency in hertz.', NULL, NULL, 'Node', 'gauge', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_guest_seconds_total', 'node_exporter', 'Seconds the cpus spent in guests (VMs) for each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_package_throttles_total', 'node_exporter', 'Number of times this cpu package has been throttled.', 'CPU', 'LOAD', 'Node', 'counter', 'package', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_cpu_seconds_total', 'node_exporter', 'Seconds the cpus spent in each mode.', 'CPU', 'LOAD', 'Node', 'counter', 'cpu,mode', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_entropy_available_bits', 'node_exporter', 'Bits of available entropy.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_exporter_build_info', 'node_exporter', 'A metric with a constant ''1'' value labeled by version, revision, branch, and goversion from which node_exporter was built.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_cpuutilization_average', 'cloudwatch', 'The percentage of allocated EC2 compute units that are currently in use on the instance.', 'CPU', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_ops_average', 'cloudwatch', 'Completed read operations from all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_ops_average', 'cloudwatch', 'Completed write operations to all instance store volumes available to the instance in a specified period of time.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_read_bytes_average', 'cloudwatch', 'Bytes read from all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_disk_write_bytes_average', 'cloudwatch', 'Bytes written to all instance store volumes available to the instance.', 'Disk', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_in_average', 'cloudwatch', 'The number of bytes received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_out_average', 'cloudwatch', 'The number of bytes sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_readonly', 'node_exporter', 'Filesystem read-only status.', NULL, NULL, 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_in_average', 'cloudwatch', 'The number of packets received on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_forks_total', 'node_exporter', 'Total number of forks.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_chip_names', 'node_exporter', 'Annotation metric for human-readable chip names', 'CPU', 'LOAD', 'Node', 'gauge', 'chip', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_fan_rpm', 'node_exporter', 'Hardware monitor for fan revolutions per minute (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_pwm', 'node_exporter', 'Hardware monitor pwm element ', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_sensor_label', 'node_exporter', 'Label for given chip and sensor', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_celsius', 'node_exporter', 'Hardware monitor for temperature (input)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_alarm_celsius', 'node_exporter', 'Hardware monitor for temperature (crit_alarm)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_crit_celsius', 'node_exporter', 'Hardware monitor for temperature (crit)', 'CPU', 'LOAD', 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_hwmon_temp_max_celsius', 'node_exporter', 'Hardware monitor for temperature (max)', NULL, NULL, 'Node', 'gauge', 'chip,sensor', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_intr_total', 'node_exporter', 'Total number of interrupts serviced.', 'OS', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_network_packets_out_average', 'cloudwatch', 'The number of packets sent out on all network interfaces by the instance.', 'Network', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebsread_ops_average', 'cloudwatch', 'Completed read operations from all Amazon EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('aws_ec2_ebswrite_ops_average', 'cloudwatch', 'Completed write operations to all EBS volumes attached to the instance in a specified period of time.', 'EBS', 'LOAD', 'AWS/EC2', 'gauge', 'instance_id', '2019-07-24 15:23:37.148501', '2019-07-24 15:23:37.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load1', 'node_exporter', '1m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load15', 'node_exporter', '15m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_load5', 'node_exporter', '5m load average.', 'CPU', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_completed_total', 'node_exporter', 'The total number of reads completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_reads_merged_total', 'node_exporter', 'The total number of reads merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_write_time_seconds_total', 'node_exporter', 'This is the total number of seconds spent by all writes.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_completed_total', 'node_exporter', 'The total number of writes completed successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_writes_merged_total', 'node_exporter', 'The number of writes merged.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_written_bytes_total', 'node_exporter', 'The total number of bytes written successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries', 'node_exporter', 'Number of currently allocated flow entries for connection tracking.', 'OS', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_nf_conntrack_entries_limit', 'node_exporter', 'Maximum size of connection tracking table.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_duration_seconds', 'node_exporter', 'node_exporter: Duration of a collector scrape.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_scrape_collector_success', 'node_exporter', 'node_exporter: Whether a collector succeeded.', NULL, NULL, 'Node', 'gauge', 'collector', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_textfile_scrape_error', 'node_exporter', '1 if there was an error opening or reading a file, 0 otherwise', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_time_seconds', 'node_exporter', 'System time in seconds since epoch (1970).', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_estimated_error_seconds', 'node_exporter', 'Estimated error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_frequency_adjustment_ratio', 'node_exporter', 'Local clock frequency adjustment.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_loop_time_constant', 'node_exporter', 'Phase-locked loop time constant.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_maxerror_seconds', 'node_exporter', 'Maximum error in seconds.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_offset_seconds', 'node_exporter', 'Time offset in between local system and reference clock.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_calibration_total', 'node_exporter', 'Pulse per second count of calibration intervals.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_error_total', 'node_exporter', 'Pulse per second count of calibration errors.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_frequency_hertz', 'node_exporter', 'Pulse per second frequency.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_seconds', 'node_exporter', 'Pulse per second jitter.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_jitter_total', 'node_exporter', 'Pulse per second count of jitter limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_shift_seconds', 'node_exporter', 'Pulse per second interval duration.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_exceeded_total', 'node_exporter', 'Pulse per second count of stability limit exceeded events.', NULL, NULL, 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_pps_stability_hertz', 'node_exporter', 'Pulse per second stability, average of recent frequency changes.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_status', 'node_exporter', 'Value of the status array bits.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_sync_status', 'node_exporter', 'Is clock synchronized to a reliable server (1 = yes, 0 = no).', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tai_offset_seconds', 'node_exporter', 'International Atomic Time (TAI) offset.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_timex_tick_seconds', 'node_exporter', 'Seconds between clock ticks.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_uname_info', 'node_exporter', 'Labeled system information as provided by the uname system call.', NULL, NULL, 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_oom_kill', 'node_exporter', '/proc/vmstat information field oom_kill.', NULL, 'ERROR', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_cpu_usage', 'micrometer', 'The "recent cpu usage" for the Java Virtual Machine process', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_uptime_seconds', 'micrometer', 'Process uptime in seconds.', NULL, NULL, 'Process', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_count', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_max', 'micrometer', 'custom service', NULL, 'DURATION', 'Service', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('service_elapsed_seconds_sum', 'micrometer', 'custom service', NULL, NULL, 'Service', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_cpu_usage', 'micrometer', 'The "recent cpu usage" for the whole system', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('system_load_average_1m', 'micrometer', 'The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time', 'CPU', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('up', 'prometheus', '1 if the instance is healthy, i.e. reachable, or 0 if the scrape failed.', NULL, 'ERROR', 'Any', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('go_threads', 'prometheus', 'Number of OS threads created.', 'Thread', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_request_size_bytes', 'prometheus', 'The HTTP request sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('http_response_size_bytes', 'prometheus', 'The HTTP response sizes in bytes.', 'Network', 'LOAD', 'Node', 'summary', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_count', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_buffer_total_capacity_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_committed_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_max_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_memory_used_bytes', 'micrometer', 'jvm info', 'Memory', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_daemon', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_live', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('jvm_threads_peak', 'micrometer', 'jvm info', 'Thread', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_now', 'node_exporter', 'The number of I/Os currently in progress.', 'Disk', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_seconds_total', 'node_exporter', 'Total seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_io_time_weighted_seconds_total', 'node_exporter', 'The weighted # of seconds spent doing I/Os.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_bytes_total', 'node_exporter', 'The total number of bytes read successfully.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_disk_read_time_seconds_total', 'node_exporter', 'The total number of seconds spent by all reads.', 'Disk', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_allocated', 'node_exporter', 'File descriptor statistics: allocated.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filefd_maximum', 'node_exporter', 'File descriptor statistics: maximum.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_connections_total', 'node_exporter', 'The total number of connections made.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_bytes_total', 'node_exporter', 'The total amount of incoming data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_incoming_packets_total', 'node_exporter', 'The total number of incoming packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_bytes_total', 'node_exporter', 'The total amount of outgoing data.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_ipvs_outgoing_packets_total', 'node_exporter', 'The total number of outgoing packets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_anon_bytes', 'node_exporter', 'Memory information field Active_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_bytes', 'node_exporter', 'Memory information field Active_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Active_file_bytes', 'node_exporter', 'Memory information field Active_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonHugePages_bytes', 'node_exporter', 'Memory information field AnonHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_AnonPages_bytes', 'node_exporter', 'Memory information field AnonPages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Bounce_bytes', 'node_exporter', 'Memory information field Bounce_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Buffers_bytes', 'node_exporter', 'Memory information field Buffers_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Cached_bytes', 'node_exporter', 'Memory information field Cached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaFree_bytes', 'node_exporter', 'Memory information field CmaFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CmaTotal_bytes', 'node_exporter', 'Memory information field CmaTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_CommitLimit_bytes', 'node_exporter', 'Memory information field CommitLimit_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Committed_AS_bytes', 'node_exporter', 'Memory information field Committed_AS_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap1G_bytes', 'node_exporter', 'Memory information field DirectMap1G_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap2M_bytes', 'node_exporter', 'Memory information field DirectMap2M_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_DirectMap4k_bytes', 'node_exporter', 'Memory information field DirectMap4k_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Dirty_bytes', 'node_exporter', 'Memory information field Dirty_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HardwareCorrupted_bytes', 'node_exporter', 'Memory information field HardwareCorrupted_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Free', 'node_exporter', 'Memory information field HugePages_Free.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Rsvd', 'node_exporter', 'Memory information field HugePages_Rsvd.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Surp', 'node_exporter', 'Memory information field HugePages_Surp.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_HugePages_Total', 'node_exporter', 'Memory information field HugePages_Total.', 'Memory', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Hugepagesize_bytes', 'node_exporter', 'Memory information field Hugepagesize_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_anon_bytes', 'node_exporter', 'Memory information field Inactive_anon_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_bytes', 'node_exporter', 'Memory information field Inactive_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Inactive_file_bytes', 'node_exporter', 'Memory information field Inactive_file_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_KernelStack_bytes', 'node_exporter', 'Memory information field KernelStack_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mapped_bytes', 'node_exporter', 'Memory information field Mapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemAvailable_bytes', 'node_exporter', 'Memory information field MemAvailable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemFree_bytes', 'node_exporter', 'Memory information field MemFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_MemTotal_bytes', 'node_exporter', 'Memory information field MemTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Mlocked_bytes', 'node_exporter', 'Memory information field Mlocked_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_NFS_Unstable_bytes', 'node_exporter', 'Memory information field NFS_Unstable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_PageTables_bytes', 'node_exporter', 'Memory information field PageTables_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Shmem_bytes', 'node_exporter', 'Memory information field Shmem_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemHugePages_bytes', 'node_exporter', 'Memory information field ShmemHugePages_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_ShmemPmdMapped_bytes', 'node_exporter', 'Memory information field ShmemPmdMapped_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Slab_bytes', 'node_exporter', 'Memory information field Slab_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SReclaimable_bytes', 'node_exporter', 'Memory information field SReclaimable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SUnreclaim_bytes', 'node_exporter', 'Memory information field SUnreclaim_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapCached_bytes', 'node_exporter', 'Memory information field SwapCached_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapFree_bytes', 'node_exporter', 'Memory information field SwapFree_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_SwapTotal_bytes', 'node_exporter', 'Memory information field SwapTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Unevictable_bytes', 'node_exporter', 'Memory information field Unevictable_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocChunk_bytes', 'node_exporter', 'Memory information field VmallocChunk_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocTotal_bytes', 'node_exporter', 'Memory information field VmallocTotal_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_VmallocUsed_bytes', 'node_exporter', 'Memory information field VmallocUsed_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_Writeback_bytes', 'node_exporter', 'Memory information field Writeback_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_memory_WritebackTmp_bytes', 'node_exporter', 'Memory information field WritebackTmp_bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InErrors', 'node_exporter', 'Statistic IcmpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_InMsgs', 'node_exporter', 'Statistic IcmpInMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp_OutMsgs', 'node_exporter', 'Statistic IcmpOutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InErrors', 'node_exporter', 'Statistic Icmp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_InMsgs', 'node_exporter', 'Statistic Icmp6InMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Icmp6_OutMsgs', 'node_exporter', 'Statistic Icmp6OutMsgs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip_Forwarding', 'node_exporter', 'Statistic IpForwarding.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_InOctets', 'node_exporter', 'Statistic Ip6InOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Ip6_OutOctets', 'node_exporter', 'Statistic Ip6OutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_InOctets', 'node_exporter', 'Statistic IpExtInOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_IpExt_OutOctets', 'node_exporter', 'Statistic IpExtOutOctets.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_ActiveOpens', 'node_exporter', 'Statistic TcpActiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_CurrEstab', 'node_exporter', 'Statistic TcpCurrEstab.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_InErrs', 'node_exporter', 'Statistic TcpInErrs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_PassiveOpens', 'node_exporter', 'Statistic TcpPassiveOpens.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Tcp_RetransSegs', 'node_exporter', 'Statistic TcpRetransSegs.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenDrops', 'node_exporter', 'Statistic TcpExtListenDrops.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_ListenOverflows', 'node_exporter', 'Statistic TcpExtListenOverflows.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesFailed', 'node_exporter', 'Statistic TcpExtSyncookiesFailed.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesRecv', 'node_exporter', 'Statistic TcpExtSyncookiesRecv.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_TcpExt_SyncookiesSent', 'node_exporter', 'Statistic TcpExtSyncookiesSent.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InDatagrams', 'node_exporter', 'Statistic UdpInDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_InErrors', 'node_exporter', 'Statistic UdpInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_NoPorts', 'node_exporter', 'Statistic UdpNoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp_OutDatagrams', 'node_exporter', 'Statistic UdpOutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InDatagrams', 'node_exporter', 'Statistic Udp6InDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_InErrors', 'node_exporter', 'Statistic Udp6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_NoPorts', 'node_exporter', 'Statistic Udp6NoPorts.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_Udp6_OutDatagrams', 'node_exporter', 'Statistic Udp6OutDatagrams.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite_InErrors', 'node_exporter', 'Statistic UdpLiteInErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_netstat_UdpLite6_InErrors', 'node_exporter', 'Statistic UdpLite6InErrors.', 'Network', 'LOAD', 'Node', 'counter', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_bytes_total', 'node_exporter', 'Network device statistic receive_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_compressed_total', 'node_exporter', 'Network device statistic receive_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_drop_total', 'node_exporter', 'Network device statistic receive_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_errs_total', 'node_exporter', 'Network device statistic receive_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_fifo_total', 'node_exporter', 'Network device statistic receive_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_frame_total', 'node_exporter', 'Network device statistic receive_frame.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_multicast_total', 'node_exporter', 'Network device statistic receive_multicast.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_receive_packets_total', 'node_exporter', 'Network device statistic receive_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_bytes_total', 'node_exporter', 'Network device statistic transmit_bytes.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_carrier_total', 'node_exporter', 'Network device statistic transmit_carrier.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_colls_total', 'node_exporter', 'Network device statistic transmit_colls.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_compressed_total', 'node_exporter', 'Network device statistic transmit_compressed.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_drop_total', 'node_exporter', 'Network device statistic transmit_drop.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_errs_total', 'node_exporter', 'Network device statistic transmit_errs.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_fifo_total', 'node_exporter', 'Network device statistic transmit_fifo.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_network_transmit_packets_total', 'node_exporter', 'Network device statistic transmit_packets.', 'Network', 'LOAD', 'Node', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_blocked', 'node_exporter', 'Number of processes blocked waiting for I/O to complete.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_procs_running', 'node_exporter', 'Number of processes in runnable state.', 'Process', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_inuse', 'node_exporter', 'Number of FRAG sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_FRAG_memory', 'node_exporter', 'Number of FRAG sockets in state memory.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_RAW_inuse', 'node_exporter', 'Number of RAW sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_sockets_used', 'node_exporter', 'Number of sockets sockets in state used.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_alloc', 'node_exporter', 'Number of TCP sockets in state alloc.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_inuse', 'node_exporter', 'Number of TCP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem', 'node_exporter', 'Number of TCP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_mem_bytes', 'node_exporter', 'Number of TCP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_orphan', 'node_exporter', 'Number of TCP sockets in state orphan.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_TCP_tw', 'node_exporter', 'Number of TCP sockets in state tw.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_inuse', 'node_exporter', 'Number of UDP sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem', 'node_exporter', 'Number of UDP sockets in state mem.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDP_mem_bytes', 'node_exporter', 'Number of UDP sockets in state mem_bytes.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_sockstat_UDPLITE_inuse', 'node_exporter', 'Number of UDPLITE sockets in state inuse.', 'Network', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_tcp_connection_states', 'node_exporter', 'Number of connection states.', 'Network', 'LOAD', 'Node', 'gauge', 'state', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgfault', 'node_exporter', '/proc/vmstat information field pgfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgmajfault', 'node_exporter', '/proc/vmstat information field pgmajfault.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgin', 'node_exporter', '/proc/vmstat information field pgpgin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pgpgout', 'node_exporter', '/proc/vmstat information field pgpgout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpin', 'node_exporter', '/proc/vmstat information field pswpin.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_vmstat_pswpout', 'node_exporter', '/proc/vmstat information field pswpout.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_files_open', 'micrometer', 'The open file descriptor count', 'File', 'LOAD', 'Process', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_open_fds', 'micrometer', 'Number of open file descriptors.', 'File', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_resident_memory_bytes', 'micrometer', 'Resident memory size in bytes.', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('process_virtual_memory_bytes', 'micrometer', '-', 'Memory', 'LOAD', 'Node', 'gauge', NULL, '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_free', 'cadvisor', 'Number of available Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_inodes_total', 'cadvisor', 'Number of Inodes', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_current', 'cadvisor', 'Number of I/Os currently in progress', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_seconds_total', 'cadvisor', 'Cumulative count of seconds spent doing I/Os', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_io_time_weighted_seconds_total', 'cadvisor', 'Cumulative weighted I/O time in seconds', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_read_seconds_total', 'cadvisor', 'Cumulative count of seconds spent reading', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_bytes_total', 'cadvisor', 'Cumulative count of bytes read', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_merged_total', 'cadvisor', 'Cumulative count of reads merged', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_reads_total', 'cadvisor', 'Cumulative count of reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_reads_total', 'cadvisor', 'Cumulative count of sector reads completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_sector_writes_total', 'cadvisor', 'Cumulative count of sector writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_usage_bytes', 'cadvisor', 'Number of bytes that are consumed by the container on this filesystem.', 'Filesystem', 'LOAD', 'Container', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_write_seconds_total', 'cadvisor', 'Cumulative count of seconds spent writing', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_bytes_total', 'cadvisor', 'Cumulative count of bytes written', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('container_fs_writes_total', 'cadvisor', 'Cumulative count of writes completed', 'Filesystem', 'LOAD', 'Container', 'counter', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_avail_bytes', 'node_exporter', 'Filesystem space available to non-root users in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_device_error', 'node_exporter', 'Whether an error occurred while getting statistics for the given device.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files', 'node_exporter', 'Filesystem total file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_files_free', 'node_exporter', 'Filesystem total free file nodes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_free_bytes', 'node_exporter', 'Filesystem free space in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('node_filesystem_size_bytes', 'node_exporter', 'Filesystem size in bytes.', 'Filesystem', 'LOAD', 'Node', 'gauge', 'device', '2019-05-15 01:08:44.148501', '2019-05-15 01:08:44.148501'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hitrate', 'cassandra_exporter', 'All time cache hit rate', 'Cache', 'LOAD', 'Cassandra', 'gauge', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_hits_count', 'cassandra_exporter', 'Total number of cache hits', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_cache_requests_count', 'cassandra_exporter', 'Total number of cache requests', 'Cache', 'LOAD', 'Cassandra', 'counter', 'cache', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_client_connectednativeclients', 'cassandra_exporter', 'Number of clients connected to this nodes native protocol server', 'Connection', 'LOAD', 'Cassandra', 'gauge', NULL, '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_failures_count', 'cassandra_exporter', 'Number of transaction failures encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_latency_seconds_count', 'cassandra_exporter', 'Number of client requests latency seconds', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_timeouts_count', 'cassandra_exporter', 'Number of timeouts encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_clientrequest_unavailables_count', 'cassandra_exporter', 'Number of unavailable exceptions encountered', 'Request', 'LOAD', 'Cassandra', 'counter', 'clientrequest', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_completedtasks', 'cassandra_exporter', 'Total number of commit log messages written', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_commitlog_totalcommitlogsize', 'cassandra_exporter', 'Current size, in bytes, used by all the commit log segments', 'Log', 'LOAD', 'Cassandra', 'counter', NULL, '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds', 'cassandra_exporter', 'Local range scan latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_rangelatency_seconds_count', 'cassandra_exporter', 'Local range scan count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds', 'cassandra_exporter', 'Local read latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_readlatency_seconds_count', 'cassandra_exporter', 'Local read count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused', 'cassandra_exporter', 'Total disk space used belonging to this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds', 'cassandra_exporter', 'Local write latency seconds for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'gauge', 'keyspace,quantile', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_keyspace_writelatency_seconds_count', 'cassandra_exporter', 'Local write count for this keyspace', 'Disk', 'LOAD', 'Cassandra', 'counter', 'keyspace', '2019-10-02 10:17:01', '2019-10-02 10:17:01'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_activetasks', 'cassandra_exporter', 'Number of tasks being actively worked on', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_completedtasks', 'cassandra_exporter', 'Number of tasks completed', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_pendingtasks', 'cassandra_exporter', 'Number of queued tasks queued up', 'Task', 'LOAD', 'Cassandra', 'gauge', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cassandra_threadpools_totalblockedtasks_count', 'cassandra_exporter', 'Number of tasks that were blocked due to queue saturation', 'Task', 'LOAD', 'Cassandra', 'counter', 'path,threadpools', '2019-10-01 16:45:21', '2019-10-01 16:45:21'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('cloudwatch_requests_total', 'cloudwatch', 'API requests made to CloudWatch', 'API', 'LOAD', 'AWS/Usage', 'counter', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_count', 'imxc_api_server', 'the number of error counts in 5s', NULL, 'ERROR', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_errors_total', 'imxc_api_server', 'the total number of errors', NULL, 'ERROR', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_request_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_count', 'imxc_api_server', 'the number of requests counts in 5s', NULL, 'LOAD', 'Service', 'gauge', 'protocol', '2019-10-15 09:37:44', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_milliseconds_total', 'imxc_api_server', 'the total time taken to serve the requests', NULL, 'DURATION', 'Service', 'gauge', 'protocol', '2019-12-10 11:22:00', '2019-10-15 09:37:44'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('imxc_service_requests_total', 'imxc_api_server', 'the total number of requests', NULL, 'LOAD', 'Service', 'counter', 'protocol', '2019-12-20 16:30:00', '2019-12-20 16:30:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_connections', 'mongodb_exporter', 'The number of incoming connections from clients to the database server', 'Connection', 'LOAD', 'MongoDB', 'gauge', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_client', 'mongodb_exporter', 'The number of the active client connections performing read or write operations', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_global_lock_current_queue', 'mongodb_exporter', 'The number of operations that are currently queued and waiting for the read or write lock', 'Lock', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_instance_uptime_seconds', 'mongodb_exporter', 'The number of seconds that the current MongoDB process has been active', 'Server', 'DURATION', 'MongoDB', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_memory', 'mongodb_exporter', 'The amount of memory, in mebibyte (MiB), currently used by the database process', 'Memory', 'LOAD', 'MongoDB', 'gauge', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_metrics_document_total', 'mongodb_exporter', 'The total number of documents processed', 'Row', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_network_bytes_total', 'mongodb_exporter', 'The number of bytes that reflects the amount of network traffic', 'Network', 'LOAD', 'MongoDB', 'counter', 'state', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mongodb_op_counters_total', 'mongodb_exporter', 'The total number of operations since the mongod instance last started', 'Request', 'LOAD', 'MongoDB', 'counter', 'type', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_aborted_connects', 'mysqld_exporter', 'The number of failed attempts to connect to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_received', 'mysqld_exporter', 'The number of bytes received from all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_bytes_sent', 'mysqld_exporter', 'The number of bytes sent to all clients', 'Network', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_commands_total', 'mysqld_exporter', 'The number of times each XXX command has been executed', 'Request', 'LOAD', 'MySQL', 'counter', 'command', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_connections', 'mysqld_exporter', 'The number of connection attempts (successful or not) to the MySQL server', 'Connection', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests', 'mysqld_exporter', 'The number of logical read requests', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests', 'mysqld_exporter', 'The number of writes done to the InnoDB buffer pool', 'Block', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_read', 'mysqld_exporter', 'The amount of data read since the server was started (in bytes)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_reads', 'mysqld_exporter', 'The total number of data reads (OS file reads)', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_writes', 'mysqld_exporter', 'The total number of data writes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_data_written', 'mysqld_exporter', 'The amount of data written so far, in bytes', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_write_requests', 'mysqld_exporter', 'The number of write requests for the InnoDB redo log', 'Log', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_log_writes', 'mysqld_exporter', 'The number of physical writes to the InnoDB redo log file', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_os_log_written', 'mysqld_exporter', 'The number of bytes written to the InnoDB redo log files', 'Disk', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits', 'mysqld_exporter', 'The number of row locks currently being waited for by operations on InnoDB tables', 'Lock', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_time', 'mysqld_exporter', 'The total time spent in acquiring row locks for InnoDB tables, in milliseconds', 'Lock', 'DURATION', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits', 'mysqld_exporter', 'The number of times operations on InnoDB tables had to wait for a row lock', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_innodb_row_ops_total', 'mysqld_exporter', 'The number of rows operated in InnoDB tables', 'Row', 'LOAD', 'MySQL', 'counter', 'operation', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_table_locks_immediate', 'mysqld_exporter', 'The number of times that a request for a table lock could be granted immediately', 'Lock', 'LOAD', 'MySQL', 'counter', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_connected', 'mysqld_exporter', 'The number of currently open connections', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_threads_running', 'mysqld_exporter', 'The number of threads that are not sleeping', 'Thread', 'LOAD', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_global_status_uptime', 'mysqld_exporter', 'The number of seconds that the server has been up', 'Server', 'DURATION', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('mysql_up', 'mysqld_exporter', 'Whether the last scrape of metrics from MySQL was able to connect to the server', 'NULL', 'ERROR', 'MySQL', 'gauge', 'NULL', '2019-12-04 16:45:00', '2019-12-04 16:45:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_locks_count', 'postgres_exporter', 'Number of locks', 'Lock', 'LOAD', 'PostgreSQL', 'gauge', 'datname,mode', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_activity_count', 'postgres_exporter', 'number of connections in this state', 'Connection', 'LOAD', 'PostgreSQL', 'gauge', 'datname,state', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_read_time', 'postgres_exporter', 'Time spent reading data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blk_write_time', 'postgres_exporter', 'Time spent writing data file blocks by backends in this database, in milliseconds', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_hit', 'postgres_exporter', 'Number of times disk blocks were found already in the buffer cache', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_blks_read', 'postgres_exporter', 'Number of disk blocks read in this database', 'Block', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_bytes', 'postgres_exporter', 'Total amount of data written to temporary files by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_temp_files', 'postgres_exporter', 'Number of temporary files created by queries in this database', 'TemporaryFile', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_deleted', 'postgres_exporter', 'Number of rows deleted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_fetched', 'postgres_exporter', 'Number of rows fetched by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_inserted', 'postgres_exporter', 'Number of rows inserted by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_returned', 'postgres_exporter', 'Number of rows returned by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_tup_updated', 'postgres_exporter', 'Number of rows updated by queries in this database', 'Row', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_commit', 'postgres_exporter', 'Number of transactions in this database that have been committed', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_stat_database_xact_rollback', 'postgres_exporter', 'Number of transactions in this database that have been rolled back', 'Transaction', 'LOAD', 'PostgreSQL', 'counter', 'datname', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); +INSERT INTO public.metric_base (meta_name, provider, description, resource_type, diag_type, entity_type, metric_type, keys, created_date, modified_date) VALUES ('pg_up', 'postgres_exporter', 'Whether the last scrape of metrics from PostgreSQL was able to connect to the server', 'NULL', 'ERROR', 'PostgreSQL', 'gauge', 'NULL', '2019-08-27 14:07:00', '2019-08-27 14:07:00'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816000, '2019-08-19 06:14:22.616', '2019-08-19 06:14:22.616', false, 4, (select id from auth_resource2 where type='menu' and name='Infrastructure' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816001, '2019-08-19 06:14:22.635', '2019-08-19 06:14:22.635', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816002, '2019-08-19 06:14:22.638', '2019-08-19 06:14:22.638', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Infrastructure')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816003, '2019-08-19 06:14:22.64', '2019-08-19 06:14:22.64', false, 4, (select id from auth_resource2 where type='menu' and name='Namespace' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816004, '2019-08-19 06:14:22.643', '2019-08-19 06:14:22.643', false, 4, (select id from auth_resource2 where type='menu' and name='Nodes' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816005, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Node Details' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816006, '2019-08-19 06:14:22.72', '2019-08-19 06:14:22.72', false, 4, (select id from auth_resource2 where type='menu' and name='Resource Usage' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816009, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Persistent Volume' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816100, '2019-08-19 06:14:22.619', '2019-08-19 06:14:22.619', false, 4, (select id from auth_resource2 where type='menu' and name='Workloads' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816105, '2019-08-19 06:14:22.657', '2019-08-19 06:14:22.657', false, 4, (select id from auth_resource2 where type='menu' and name='Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816106, '2019-08-19 06:14:22.66', '2019-08-19 06:14:22.66', false, 4, (select id from auth_resource2 where type='menu' and name='Cron Jobs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816107, '2019-08-19 06:14:22.646', '2019-08-19 06:14:22.646', false, 4, (select id from auth_resource2 where type='menu' and name='Pods' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816200, '2019-08-19 06:14:22.621', '2019-08-19 06:14:22.621', false, 4, (select id from auth_resource2 where type='menu' and name='Services' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816201, '2019-08-19 06:14:22.698', '2019-08-19 06:14:22.698', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816202, '2019-08-19 06:14:22.728', '2019-08-19 06:14:22.728', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816203, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Services')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816300, '2019-08-19 06:14:22.624', '2019-08-19 06:14:22.624', false, 4, (select id from auth_resource2 where type='menu' and name='Diagnosis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816301, '2019-08-19 06:14:22.705', '2019-08-19 06:14:22.705', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Diagnosis') ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816309, '2019-08-19 06:14:22.668', '2019-08-19 06:14:22.668', false, 4, (select id from auth_resource2 where type='menu' and name='Troubleshooting' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816400, '2019-08-19 06:14:22.627', '2019-08-19 06:14:22.627', false, 4, (select id from auth_resource2 where type='menu' and name='Statistics & Analysis') , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816401, '2019-08-19 06:14:22.671', '2019-08-19 06:14:22.671', false, 4, (select id from auth_resource2 where type='menu' and name='Performance Trends' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816402, '2019-08-19 06:14:22.731', '2019-08-19 06:14:22.731', false, 4, (select id from auth_resource2 where type='menu' and name='Alert Analysis' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816403, '2019-08-19 06:14:22.674', '2019-08-19 06:14:22.674', false, 4, (select id from auth_resource2 where type='menu' and name='Alert History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816404, '2019-08-19 06:14:22.677', '2019-08-19 06:14:22.677', false, 4, (select id from auth_resource2 where type='menu' and name='Anomaly Score' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816405, '2019-08-19 06:14:22.679', '2019-08-19 06:14:22.679', false, 4, (select id from auth_resource2 where type='menu' and name='Job History' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816406, '2019-08-19 06:14:22.685', '2019-08-19 06:14:22.685', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816407, '2019-08-19 06:14:22.682', '2019-08-19 06:14:22.682', false, 4, (select id from auth_resource2 where type='menu' and name='Log Viewer' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816408, '2019-08-19 06:14:22.725', '2019-08-19 06:14:22.725', false, 4, (select id from auth_resource2 where type='menu' and name='Event Logs' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816409, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Container Life Cycle' and parent_id=(select id from auth_resource2 where type='menu' and name='Statistics & Analysis')) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816500, '2019-08-19 06:14:22.629', '2019-08-19 06:14:22.629', false, 4, (select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816501, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816502, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Reports' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816550, '2019-08-19 06:14:22', '2019-08-19 06:14:22', false, 4, (select id from auth_resource2 where type='menu' and name='Dashboards' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816551, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Documents' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816552, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Templates' and parent_id=(select id from auth_resource2 where type='menu' and name='Dashboards' and parent_id is null)) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816700, '2019-08-19 06:14:22.632', '2019-08-19 06:14:22.632', false, 4, (select id from auth_resource2 where type='menu' and name='Settings' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816701, '2019-08-19 06:14:22.687', '2019-08-19 06:14:22.687', false, 4, (select id from auth_resource2 where type='menu' and name='User & Group' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816702, '2019-08-19 06:14:22.69', '2019-08-19 06:14:22.69', false, 4, (select id from auth_resource2 where type='menu' and name='Alert' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816703, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Host Alerts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816704, '2019-08-19 06:14:22.693', '2019-08-19 06:14:22.693', false, 4, (select id from auth_resource2 where type='menu' and name='Sparse Logs' and parent_id=(select id from auth_resource2 where type='menu' and name='Settings' )) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816706, '2019-08-19 06:14:22.717', '2019-08-19 06:14:22.717', false, 4, (select id from auth_resource2 where type='menu' and name='Metric Meta' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816707, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='Notification' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816708, '2019-08-19 06:14:22.696', '2019-08-19 06:14:22.696', false, 4, (select id from auth_resource2 where type='menu' and name='General' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816709, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='License' ) , 'admin'); + +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816800, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Hosts' ) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816801, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Topology' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816802, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Overview' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816803, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='List' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816804, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Detail' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); +-- INSERT INTO public.user_permission2 (id, created_date, modified_date, all_child, permission, auth_resource_id, user_id) VALUES (3816805, '2019-08-19 06:14:22.734', '2019-08-19 06:14:22.734', false, 4, (select id from auth_resource2 where type='menu' and name='Group' and parent_id=(select id from auth_resource2 where type='menu' and name='Hosts')) , 'admin'); + + + + +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (97, '2019-04-02 18:07:31.319', '2019-04-02 18:07:31.319', 'NODE CPU 사용', '(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m])) * 100))', 'Node CPU Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id }} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (1, '2019-04-15 02:26:13.826', '2019-04-15 02:26:24.02', 'NODE Disk 사용', '(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', {filter} }))) * 100', 'Node Disk Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Disk 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (119, '2019-04-02 18:08:50.17', '2019-04-02 18:08:50.17', 'NODE Memory 사용', '(1- ((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node''})) * 100', 'Node Memory Usage', 'node', 'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Memory 사용률이 {threshold}%를 초과했습니다. 현재값 : {{humanize $value}}%'); +INSERT INTO public.alert_rule_meta ( id, created_date, modified_date, description, expr, meta_name, target, message ) VALUES (2, '2019-04-15 05:27:56.544', '2019-04-15 05:27:59.924', 'Container CPU 사용', 'sum (rate (container_cpu_usage_seconds_total{ {filter} }[1m])) by (xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id) * 100', 'Container CPU Usage', 'controller', 'Cluster:{{$labels.xm_clst_id }} POD:{{$labels.xm_pod_id}} CPU 사용률이 {threshold}%를 초과했습니다. 현재값:{{humanize $value}}%'); + + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_user','Container CPU User (%)','Container CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_working_set_bytes','Container Memory Working Set (GiB)','Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_working_set_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_io_seconds','Host io Disk seconds','Host disk io seconds','sum by (instance) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Disk IO Seconds:{{humanize $value}}|{threshold}.','2020-03-23 04:08:37.359','2020-03-23 04:08:37.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_write_byte','host disk R/W byte','host disk R/W byte','sum by (data_type, instance) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]) or rate(node_disk_read_bytes_total{{filter}}[5m]), "data_type", "Read", "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]) or rate(node_disk_written_bytes_total{{filter}}[5m]), "data_type", "Write", "", "") )','Disk','Host',NULL,false,false,'Host:{{$labels.instance}} Read/Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2020-03-24 05:21:53.915','2020-03-24 05:24:52.674'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_free','Host Memory Free (GiB)','Memory information field MemFree_bytes','(node_memory_MemAvailable_bytes{{filter}} or (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:18.977','2020-03-23 04:08:18.977'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_sent','Number of Bytes Sent','The number of bytes sent to all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_sent[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Sent:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_namespace','Containe memory sum by namespace','Containe memory sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','memory','Namespace',NULL,false,false,'Container memory sum by namespace','2020-07-03 04:31:10.079','2020-07-03 08:38:17.034'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_count','Node Count','node count','count by(xm_clst_id, xm_namespace,xm_node_id) (up{{filter}})','Node','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} NODE:{{$labels.xm_node_id}} Node Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_restart_count','Container Restart Count','container restart count group by namespace','sum by(xm_clst_id, xm_namespace, pod_name ) (increase(imxc_kubernetes_container_restart_count{{filter}}[10s]))','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container Restart Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_usage','Node CPU Usage (%)','NODE CPU Usage','(100 - (avg by (xm_clst_id, xm_node_id, xm_entity_type)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0) * 100)))','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency_device','Node Disk Read Latency per Device (ms)','Node Disk Read Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage_per_device','Node Filesystem Usage per device (%)','NODE Filesystem Usage per Device','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_usage','Node Memory Usage (%)','Node Memory Usage','sum by (xm_clst_id, xm_node_id)((node_memory_MemTotal_bytes{xm_entity_type="Node"}- (node_memory_MemFree_bytes{xm_entity_type="Node"} + node_memory_Cached_bytes{xm_entity_type="Node"} + node_memory_Buffers_bytes{xm_entity_type="Node"})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node"}- node_memory_MemFree_bytes{xm_entity_type="Node"}) / (sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})) * 100','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_tablespace_size','Tablespace Size (GiB)','Generic counter metric of tablespaces bytes in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, tablespace, type) (oracledb_tablespace_bytes) / 1073741824','Tablespace','OracleDB','tablespace, type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Tablespace Size:{{humanize $value}}GiB|{threshold}GiB.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_allocated_size','Allocated Memory (MiB)','The total amount of memory that the Redis allocator allocated','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_allocated_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Allocated Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_kubernetes_event_count','Cluster events count','Kubernetes Namespace Events count','sum by (xm_clst_id, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Event Count:{{humanize $value}}|{threshold}.','2019-09-26 05:33:37.000','2020-04-27 05:38:47.804'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_limit','cluster_memory_limit (Gib)','Total container limit size in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Limits:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_total_count','Cluster Pod Total Count','Cluster Pod Total Count','sum by (xm_clst_id) (imxc_kubernetes_controller_counts{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Total Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_free','Host Swap Memory Free','Host Swap Free','node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Free Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:24.594','2020-03-23 04:08:24.594'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_context_switch_count','Host Context','Total number of context switches.','sum by (instance) (node_context_switches_total{{filter}})','CPU','Host',NULL,false,false,'None','2020-03-23 04:08:15.000','2020-03-23 04:08:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_used','Host system Filesystem used','Host File system used','sum by (instance) (node_filesystem_size_bytes{{filter}}-node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:30.407','2020-03-23 04:08:30.407'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_io','Node Disk I/O','Total seconds spent doing I/Os','avg by (xm_clst_id, xm_node_id) (rate(node_disk_io_time_seconds_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:55.992'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage','Container Filesystem Usage (%)','Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_reads','Container Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_namespace','Container cpu sum by namespace','Container cpu sum by namespace','sum by(xm_clst_id, xm_namespace, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Namespace',NULL,false,false,'.','2020-05-30 08:30:10.158','2020-06-09 02:00:50.856'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size','Node Filesystem Available Size (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_running_count','Node Pod Running Count','Node Pod Running Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Running Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-06 08:02:40.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_user','Pod CPU User (%)','Pod CPU Usage (User)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU User:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_reads','Pod Filesystem Read Bytes (KiB)','Cumulative count of bytes read / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_reads_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Read Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:53:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_max_usage_bytes','Pod Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_max_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Max Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_receive','Pod Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Receive:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hits_count','Total number of cache hits (count/s)','Total number of cache hits','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_hits_count{{filter}}[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Counts per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:24:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_failures_count','Number of transaction failures encountered','Number of transaction failures encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_failures_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Failure Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_connections_and_tasks','Cassandra connections & tasks','cassandra connections & tasks','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "data_type", "Active tasks", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "data_type", "Pending tasks", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "data_type", "Client connections", "", "") )','Connection','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Connections and Tasks:{{humanize $value}}|{threshold}.','2020-01-02 09:11:48.000','2020-02-13 01:24:51.522'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_transmit','Pod Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Network Transmit:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_request','cluster_memory_request (Gib)','Total container memory request in GiB for the given cluster','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Cluster',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_count','Local read count (count/s)','Local read count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_readlatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','cluster_cpu_capacity_cores','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_cpu{{filter}})','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Cluster CPU Capacity Cores:{{humanize $value}}|{threshold}.','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_alerts_received_count','Cluster alerts received count','Alert count by cluster','sum by (xm_clst_id, level) (ceil(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Alert Received Counts:{{humanize $value}}|{threshold}.','2019-08-23 04:41:49.000','2020-04-28 08:09:09.429'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_throttled_time','Container CPU Throttled Time','container cpu_throttled time','sum by(xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) (increase(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="", {filter}}[10s]))','CPU','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_hitrate','All time cache hit rate','All time cache hit rate','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (cassandra_cache_hitrate {{filter}} * 100)','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-12-13 01:19:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_read_bytes','Bytes Read from All Instance Store Volumes (KiB)','Bytes read from all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_read_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_disk_write_bytes','Bytes Written to All Instance Store Volumes (KiB)','Bytes written to all instance store volumes available to the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_disk_write_bytes_average{{filter}}) / 1024','Disk','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebswrite_bytes','Bytes written to all EBS volumes (KiB)','Bytes written to all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebswrite_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_cache_requests_count','Total number of cache requests (count/s)','Total number of cache requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cache) (rate(cassandra_cache_requests_count[1m]))','Cache','Cassandra','cache',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_latency','Local write latency (ms)','Local write latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_usage','Cluster Memory Usage (%)','All Nodes Memory Usage in cluster.','(1- avg by (xm_clst_id) (((node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Cached_bytes{xm_entity_type=''Node'', {filter}} + node_memory_Buffers_bytes{xm_entity_type=''Node'', {filter}}) <= node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} or node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}}) / node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}})) * 100','Memory','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-07-18 06:12:22.000','2020-04-22 04:59:14.251'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections_metrics_created_total','Incoming Connections Created','Count of all incoming connections created to the server (This number includes connections that have since closed)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_connections_metrics_created_total[1m]))','Connection','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Incoming Connections Created Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_disk_io','MySQL Disk I/O','MySQL Disk I/O','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_data_read[1m]), "data_type", "read", "", "") or +label_replace(rate(mysql_global_status_innodb_data_written[1m]), "data_type", "written", "", ""))','Disk','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} SVC:{{$labels.xm_service_name}} Mysql Disk IO:{{humanize $value}}|{threshold}.','2019-12-05 08:48:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_capacity_count','Cluster Pod Capacity Count','Cluster Pod Capacity Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Capacity Pod Counts:{{humanize $value}}|{threshold}.','2019-08-27 04:45:52.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_kubernetes_event_count','Namespace events count','Kubernetes Namespace Events count','sum by (xm_clst_id, xm_namespace, type) (imxc_kubernetes_event_in_last_min{{filter}})','Event','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Events:{{humanize $value}}|{threshold}.','2019-09-24 06:42:09.000','2019-09-24 06:42:34.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_capacity_cores','node_cpu_capacity_cores','node_cpu_capacity_cores','imxc_kubernetes_node_resource_capacity_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_allocatable_cores','node_cpu_allocatable_cores','node_cpu_allocatable_cores','imxc_kubernetes_node_resource_allocatable_cpu{{filter}}','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_capacity_count','Node Pod Capacity Count','Node Pod Capacity Count','imxc_kubernetes_node_resource_capacity_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Capacity Count of Pods:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_allocatable','node_memory_allocatable (Gib)','imxc_kubernetes_node_resource_allocatable_memory in GiB','imxc_kubernetes_node_resource_allocatable_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_limit','node_memory_limit (Gib)','Total container memory limit for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_readwritelatency_seconds','Cassandra Read/Write Latency (ms)','Cassandra Read/Write Latency (ms)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) or (cassandra_keyspace_writelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Cassandra Keyspace Readwritelatency Seconds:{{humanize $value}}ms|{threshold}ms.','2019-10-23 01:46:07.000','2019-11-05 09:03:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_usage','Cluster CPU Usage (%)','All Nodes CPU Usage in cluster.','(100 - (avg by (xm_clst_id)(clamp_max(rate(node_cpu_seconds_total{ name=''node-exporter'', mode=''idle'', xm_entity_type=''Node'', {filter} }[1m]),1.0)) * 100))','CPU','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-07-18 05:54:39.000','2020-04-22 04:59:14.253'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_bytes_received','Number of Bytes Received','The number of bytes received from all clients','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_bytes_received[1m]))','Network','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Received:{{humanize $value}}KiB|{threshold}KiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_request','node_memory_request (Gib)','Total container memory request in GiB for the given cluster, node','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:45:47.000','2019-08-23 08:45:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_tasks','Number of tasks','Number of tasks','sum by (task_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(cassandra_threadpools_activetasks {{filter}}, "task_type", "active", "", "") or +label_replace(cassandra_threadpools_pendingtasks {{filter}}, "task_type", "pending", "", "") or +label_replace(cassandra_client_connectednativeclients {{filter}}, "task_type", "connected", "", "") )','Task','Cassandra','task_type',true,false,'Number of tasks','2019-10-24 01:34:25.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_latency_seconds','Local latency seconds','Local latency seconds','sum by(type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(cassandra_keyspace_readlatency_seconds{quantile=''0.99'', {filter}}, "type", "read", "", "") or +label_replace(cassandra_keyspace_writelatency_seconds{quantile=''0.99'', {filter}}, "type", "write", "", "")) * 1000','Disk','Cassandra',NULL,true,true,'Local latency seconds','2019-10-24 02:14:45.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_concurrency','Wait-Time - Concurrency','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_concurrency[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Concurrency:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_pendingtasks','Number of queued tasks queued up','Number of queued tasks queued up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_pendingtasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Active Task:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_ready_count','Cluster Pod Ready Count','Cluster Pod Ready Count','sum by (xm_clst_id) (imxc_kubernetes_controller_ready{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Ready Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_allocatable_count','Node Pod Allocatable Count','Node Pod Allocatable Count','imxc_kubernetes_node_resource_allocatable_pods{{filter}}','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Allocatable Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_conatiner_count','Container Type Sparselog Count','Container-type sparse log count by xm_clst_id, xm_namespace, xm_node_id, xm_pod_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_namespace, xm_node_id, xm_pod_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Pod",{filter}}[1m])))','SparseLog','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_connected','Number of Open Connections','The number of currently open connections','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_connected)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Open Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_ebsread_bytes','Bytes read from all EBS volumes (KiB)','Bytes read from all EBS volumes attached to the instance in a specified period of time.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_ebsread_bytes_average{{filter}}) / 1024','EBS','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} EBS Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_cpu_usage','Namespace CPU Usage (%)','CPU Usage by namespace','sum by (xm_clst_id,xm_entity_type,xm_namespace) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'', {filter}}[1m])) * 100','CPU','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 01:06:05.000','2019-08-23 01:06:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_memory_usage','Namespace memory usage (Gib)','Memory usage by namespace in bytes / 1073741824','sum by (xm_clst_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'', {filter}}) / 1073741824','Memory','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 01:21:31.000','2019-08-23 01:21:31.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_free','Node Memory Free (GiB)','Memory information field MemFree_bytes / 1073741824','node_memory_MemFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_cached','Node Swap Memory Cached (GiB)','Memory information field SwapCached_bytes / 1073741824','node_memory_SwapCached_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Cached Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_active_size','Active Memory (MiB)','The total amount of active memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_active_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Active Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_up','MySQL Up Count','Whether the last scrape of metrics from MySQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_up)','Instance','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Up counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_up','Oracle DB Up Count','Whether the Oracle database server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_up)','Instance','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle DB Up Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_process_count','Process Count','Gauge metric with count of processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (oracledb_process_count)','Process','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Process Count Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_locks_count','Number of Locks','Number of locks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, mode) (pg_locks_count)','Lock','PostgreSQL','datname,mode',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Lock Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_updated','Number of Rows Updated','Number of rows updated by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_updated[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Updated Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_deleted','Number of Rows Deleted','Number of rows deleted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_deleted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Deleted Row counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_files','Number of Temporary Files Created','Number of temporary files created by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_files[1m]))','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load15','Node CPU Load 15m Average','Node CPU 15m load average','node_load15{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 15m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:27:39.000','2019-05-15 08:27:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_throttling','Node CPU Throttling','Number of times this cpu package has been throttled.','increase(node_cpu_package_throttles_total{xm_entity_type=''Node'',{filter}}[1m])','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU Throttling Counts:{{humanize $value}}|{threshold}.','2019-05-15 08:29:24.000','2019-05-15 08:29:24.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_usage','Pod CPU Usage (%)','Pod CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_system','Pod CPU System (%)','Pod CPU Usage (System)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',{filter}}[1m])) * 100','CPU','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage_bytes','Pod Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Used Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_limit_bytes','Pod Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Limit Bytes:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load5','Node CPU Load 5m Average','Node CPU 5m load average','node_load5{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 5m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:26:07.000','2019-05-15 08:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_client_connectednativeclients','Number of Client Connections','Number of clients connected to this nodes native protocol server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_client_connectednativeclients)','Connection','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-07 11:59:04.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_activetasks','Number of tasks being actively worked on','Number of tasks being actively worked on','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (cassandra_threadpools_activetasks)','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Connection:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cloudwatch_requests_count','API requests made to CloudWatch','API requests made to CloudWatch','sum by (xm_clst_id, namespace, action) (rate(cloudwatch_requests_total{{filter}}[10m]))','Request','AWS/Usage',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.namespace}} CloudWatch API Call Volume:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_out','Bytes Sent Out on All Network Interfaces (KiB)','The number of bytes sent out on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_out_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_network_in','Bytes Received on All Network Interfaces (KiB)','The number of bytes received on all network interfaces by the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_network_in_average{{filter}}) / 1024','Network','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_count','Namespace Pod Count','Pod count by namesapce','count (sum (container_last_seen{{filter}}) by (xm_clst_id, xm_namespace, xm_pod_id)) by (xm_clst_id, xm_namespace)','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Pod Counts:{{humanize $value}}|{threshold}.','2019-08-22 16:53:32.000','2019-08-23 01:06:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_usage','Node Filesystem Usage (%)','NODE Filesystem Usage','(1- (sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }))) * 100','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-05-15 01:02:23.000','2019-05-15 01:02:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_available','Node Memory Available (GiB)','Memory information field MemAvailable_bytes / 1073741824','node_memory_MemAvailable_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Avail Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_total','Node Memory Total (GiB)','Memory information field MemTotal_bytes / 1073741824','node_memory_MemTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive','Node Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:07:46.000','2019-05-31 17:45:22.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit','Node Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 09:09:05.000','2019-05-31 17:46:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_allocated_count','Cluster Pod Allocated Count','Cluster Pod Allocated Count','sum by (xm_clst_id) (imxc_kubernetes_node_resource_allocatable_pods{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Allocated Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 17:36:00.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_pod_desired_count','Cluster Pod Desired Count','Cluster pod desired count by controller','sum by (xm_clst_id) (imxc_kubernetes_controller_replicas{{filter}})','Pod','Cluster',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Desired Pod Counts:{{humanize $value}}|{threshold}.','2019-08-23 02:26:55.000','2019-11-28 08:25:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_commands_total','Number of Commands Executed','The number of times each XXX command has been executed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, command) (rate(mysql_global_status_commands_total[1m]) > 0)','Request','MySQL','command',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Commands Executed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-12 08:20:06.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_threads_running','Number of Threads Running','The number of threads that are not sleeping','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_threads_running)','Thread','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_dbname_state','Count by dbname and state in pg','count by dbname and state in pg','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (pg_stat_activity_count)','Connection','PostgreSQL','state',true,false,'count by dbname and state in pg','2020-01-30 06:10:54.000','2020-01-31 11:33:41.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_alerts_received_count','Namespace alerts received count','Alert count by namespace','sum by (xm_clst_id, xm_namespace, level) (floor(increase(imxc_alerts_received_count_total{status=''firing'', {filter}}[10m])))','Alert','Namespace','level',false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Alert Count:{{humanize $value}}|{threshold}.','2019-08-23 04:43:29.000','2019-08-23 04:43:29.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_reads_count_device','Node Disk Reads Count per Device (IOPS)','Node Disk Reads Count per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_reads_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Reads Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_latency','Node Disk Read Latency (ms)','Node Disk Read Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_read_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 10:59:07.000','2019-05-31 17:46:54.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency_device','Node Disk Write Latency per Device (ms)','Node Disk Write Latency per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes','Node Disk Write Bytes (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_avail_size_device','Node Filesystem Available Size per Device (GiB)','Filesystem space available to non-root users in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_avail_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Avail Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size_device','Node Filesystem Free Size per Device (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size_device','Node Filesystem Total Size per Device (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, fs_type, mountpoint) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node','device,fs_type',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_free','Node Swap Memory Free (GiB)','Memory information field SwapFree_bytes / 1073741824','node_memory_SwapFree_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Free Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_swap_memory_total','Node Swap Memory Total (GiB)','Memory information field SwapTotal_bytes / 1073741824','node_memory_SwapTotal_bytes{xm_entity_type=''Node'', {filter}} / 1073741824','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 16:03:00.000','2019-06-04 16:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_up','PostgreSQL Up Count','Whether the last scrape of metrics from PostgreSQL was able to connect to the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (pg_up)','Instance','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Instance Count:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_write_requests','Number of Writes to Buffer Pool','The number of writes done to the InnoDB buffer pool','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_write_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Writes to Buffer Pool Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_read_requests','Number of Logical Read Requests','The number of logical read requests','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_read_requests[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Logical Read Requests Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_read','Amount of Data Read','The amount of data read since the server was started (in bytes)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_read[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Read Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_os_log_written','Number of Bytes Written to Redo Log','The number of bytes written to the InnoDB redo log files','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_os_log_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Bytes Written to Redo Log Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_data_written','Amount of Data Written','The amount of data written so far, in bytes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_data_written[1m]))','Disk','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Amount of Data Written Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pod','Container Memory Request/Limits vs Used by Pod','container_memory_sum_by_pod','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type) ( +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,true,false,'Container memory sum by pod (limit, request, used)','2020-07-22 21:44:33.000','2020-07-22 21:44:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_cache_hit_ratio','Buffer Cache Hit Ratio','Buffer Cache Hit Ratio','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ( +(1 - increase(mysql_global_status_innodb_buffer_pool_reads [1h]) / increase(mysql_global_status_innodb_buffer_pool_read_requests [1h])) * 100)','Block','MySQL',NULL,true,false,'.','2019-12-05 07:47:50.000','2019-12-13 01:17:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_cluster','Container CPU Request/Limits vs Used by Cluster','Container cpu sum by cluster (capacity, limit, request, usage)','sum by(xm_clst_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} *0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})*0.001, "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})*0.001, "data_type", "request", "" , "") or +label_replace(sum by(xm_clst_id)(rate(container_cpu_usage_seconds_total{{filter}}[1m])), "data_type", "used", "" , ""))','CPU','Cluster',NULL,true,false,'Container cpu sum by cluster','2020-07-22 17:49:53.000','2020-07-22 17:49:53.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_total_size','Node Filesystem Total Size (GiB)','Filesystem size in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_size_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Total Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_filesystem_free_size','Node Filesystem Free Size (GiB)','Filesystem free space in bytes / 1073741824','sum by (xm_clst_id, xm_node_id, xm_entity_type) (node_filesystem_free_bytes{xm_entity_type=''Node'', device!=''rootfs'', {filter} }) / 1073741824','Filesystem','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Filesystem Free Size:{{humanize $value}}GiB|{threshold}GiB.','2019-06-04 19:47:00.000','2019-06-04 19:47:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pod','Container CPU Request/Limits vs Used by Pod','Container cpu sum by pod (capacity, limit, request, usage)','sum by(xm_clst_id, xm_namespace, xm_node_id, xm_pod_id, xm_cont_name, data_type)( +label_replace (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m]), "data_type", "used", "", "") or +label_replace (imxc_kubernetes_container_resource_limit_cpu{{filter}}*0.001, "data_type", "limit", "", "") or +label_replace (imxc_kubernetes_container_resource_request_cpu{{filter}}*0.001, "data_type", "request", "", "") +)','CPU','Pod',NULL,true,false,'Container cpu sum by Pod','2020-07-22 21:37:45.000','2020-07-22 21:37:45.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_count_by_lockmode','Count_by_lockmode','Count by lockmode','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, mode) (pg_locks_count)','Lock','PostgreSQL','mode',true,false,'Count by lockmode','2020-01-30 07:06:13.000','2020-01-30 07:06:47.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_current_waits','Number of Row Locks ','The number of row locks currently being waited for by operations on InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (mysql_global_status_innodb_row_lock_current_waits)','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_memory_capacity','cluster_memory_capacity (Gib)','imxc_kubernetes_node_resource_capacity_memory','sum by (xm_clst_id) (imxc_kubernetes_node_resource_capacity_memory{{filter}})','Memory','Cluster',NULL,false,false,'CLST:{{$labels.xm_clst_id}} Memory Capacity:{{humanize $value}}GiB|{threshold}GiB.','2019-08-23 08:46:58.000','2020-05-27 09:05:56.427'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_free','Host system Filesystem free','Host File system free','sum by (instance) (node_filesystem_free_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Free Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:29.025','2020-03-23 04:08:29.025'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total','Host system Filesystem total','Host File system total','sum by (instance) (node_filesystem_size_bytes{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem Total Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:27.634','2020-03-23 04:08:27.634'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_used','Host Swap Memory Used','Host Swap Used','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Used Swap Memory Size:{{humanize $value}}KiB|{threshold}KiB.','2020-03-23 04:08:26.169','2020-03-23 04:08:26.169'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes_device','Node Disk Read Bytes per Device (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_bytes','Node Disk Read Bytes (KiB)','The total number of bytes read successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_disk_read_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Read Size:{{humanize $value}}KiB|{threshold}KiB.','2019-06-04 18:11:00.000','2019-06-04 18:11:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_rollback','Number of Transactions Rolled Back','Number of transactions in this database that have been rolled back','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_rollback[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Rollback Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_xact_commit','Number of Transactions Committed','Number of transactions in this database that have been committed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_xact_commit[1m]))','Transaction','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Commit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_ops_total','Number of Rows Operated','The number of rows operated in InnoDB tables','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, operation) (rate(mysql_global_status_innodb_row_ops_total[1m]))','Row','MySQL','operation',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Rows Operated Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_immediate','Number of Table Lock Immediate','The number of times that a request for a table lock could be granted immediately','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_immediate[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Immediate Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_count','Local range scan count (count/s)','Local range scan count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_rangelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_table_locks_waited','Number of Table Lock Waited','The number of times that a request for a table lock could not be granted immediately and a wait was needed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_table_locks_waited[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Table Lock Waited Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_time','Time Spent Reading Data File Blocks (ms)','Time spent reading data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_read_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_write_time','Time Spent Writing Data File Blocks (ms)','Time spent writing data file blocks by backends in this database, in milliseconds','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blk_write_time[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Write Time:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_read','Number of Disk Blocks Read','Number of disk blocks read in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_read[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Read Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blks_hit','Number of Block Cache Hit','Number of times disk blocks were found already in the buffer cache','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_blks_hit[1m]))','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Block Hit Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_activity_count','Number of Client Connections','number of connections in this state','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname, state) (pg_stat_activity_count{{filter}})','Connection','PostgreSQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Connection Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-11-18 04:16:33.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_fetched','Number of Rows Fetched','Number of rows fetched by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_fetched[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Fetched Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_inserted','Number of Rows Inserted','Number of rows inserted by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_inserted[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Inserted Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_range_latency','Local range scan latency (ms)','Local range scan latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_rangelatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Range Scan Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_size','Size used by commit log segments (KiB/s)','Current size, in bytes, used by all the commit log segments / 1024','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_totalcommitlogsize[1m]){{filter}}) / 1024','Log','Cassandra',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Volume:{{humanize $value}}KiB/s|{threshold}KiB/s.','2019-10-02 10:17:01.000','2019-11-05 08:07:03.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog_messages','Number of commit log messages written (count/s)','Total number of commit log messages written','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(cassandra_commitlog_completedtasks[1m]))','Log','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Commit Log Message per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_count','Number of client requests (count/s)','Number of client requests by request type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_latency_seconds_count{{filter}}[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Client Request per second:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 11:04:25.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_active','Node Memory Active (GiB)','Memory information field Active_bytes in GiB','node_memory_Active_bytes{xm_entity_type=''Node'', {filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Active Memory:{{humanize $value}}GiB|{threshold}GiB.','2020-06-04 11:11:11.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_returned','Number of Rows Returned','Number of rows returned by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_tup_returned[1m]))','Row','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Returned Row Counts:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_write_count','Local write count (count/s)','Local write count for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (rate(cassandra_keyspace_writelatency_seconds_count[1m]))','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Write Count:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_cluster','Container Memory Request/Limits vs Used by Cluster','Container memory sum by cluster','sum by (xm_clst_id, data_type)( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity", "" , "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "", "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "", "") or +label_replace(container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}, "data_type", "used", "" , ""))','Memory','Cluster',NULL,true,false,'Container memory sum by cluster','2020-07-22 21:23:15.000','2020-07-22 21:23:15.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_capacity','node_memory_capacity (Gib)','node memory capacity in GiB','imxc_kubernetes_node_resource_capacity_memory{{filter}} / 1024 / 1024 / 1024','Memory','Node',NULL,false,false,'None','2019-08-23 08:46:58.000','2019-08-23 08:46:58.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_request_cores','cluster_cpu_request_cores','cluster_cpu_request_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_request_cores','node_cpu_request_cores','node_cpu_request_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_cpu_limit_cores','cluster_cpu_limit_cores','cluster_cpu_limit_cores','sum by (xm_clst_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Cluster',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_limit_cores','node_cpu_limit_cores','node_cpu_limit_cores','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Node',NULL,false,false,'None','2019-08-23 08:40:36.000','2019-08-23 08:40:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_unavailables_count','Number of unavailable exceptions encountered','Number of unavailable exceptions encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_unavailables_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Unavailable Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_up','Cassandra Up Count','Whether the last scrape of metrics from Cassandra was able to connect to the server','count by (xm_clst_id, xm_namespace, xm_node_id, instance) (cassandra_bufferpool_size{{filter}})','Instance','Cassandra',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Instances:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-11-05 17:01:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_up','MongoDB Up Count','The number of seconds that the current MongoDB process has been active','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mongodb_instance_uptime_seconds[1m]))','Instance','MongoDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Up Count Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_current_queue','Number of Operations Waiting','The number of operations that are currently queued and waiting for the read or write lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_current_queue)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Waiting Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_global_lock_client','Number of Active Client','The number of the active client connections performing read or write operations','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_global_lock_client)','Lock','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Active Client Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_metrics_document_total','Number of Documents Processed','The total number of documents processed','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_metrics_document_total[1m]))','Row','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Documents Processed Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_totaldiskspaceused','Total disk space used (GiB)','Total disk space used belonging to this keyspace / 1073741824','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_totaldiskspaceused {{filter}}) / 1073741824','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Disk Space:{{humanize $value}}GiB|{threshold}GiB.','2019-10-02 10:17:01.000','2019-11-07 01:14:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_keyspace_read_latency','Local read latency (ms)','Local read latency seconds for this keyspace','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, keyspace) (cassandra_keyspace_readlatency_seconds{quantile=''0.99''}) * 1000','Disk','Cassandra','keyspace',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Read Latency:{{humanize $value}}ms|{threshold}ms.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_totalblockedtasks','Number of tasks that were blocked (count/s)','Number of tasks that were blocked due to queue saturation in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_totalblockedtasks_count[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Blocked Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-10-01 16:45:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_threadpools_completedtasks','Number of tasks completed (count/s)','Number of tasks completed in a second','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, path) (rate(cassandra_threadpools_completedtasks{{filter}}[1m]))','Task','Cassandra','path',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Pending Task per second:{{humanize $value}}|{threshold}.','2019-10-01 16:45:21.000','2019-11-05 08:08:57.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_memory','Amount of Memory, in MebiByte','The amount of memory, in mebibyte (MiB), currently used by the database process','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (mongodb_memory)','Memory','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Memory:{{humanize $value}}MiB|{threshold}MiB.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_resource_utilization','Resource Usage','Gauge metric with resource utilization','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) (oracledb_resource_current_utilization)','Resource','OracleDB','resource_name',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Resource Usage:{{humanize $value}}%|{threshold}%.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_timeouts_count','Number of timeouts encountered','Number of timeouts encountered','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, clientrequest) (rate(cassandra_clientrequest_timeouts_count[1m]))','Request','Cassandra','clientrequest',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Timeout Request:{{humanize $value}}|{threshold}.','2019-10-02 10:17:01.000','2019-10-02 10:17:01.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_network_bytes_total','Amount of Network Traffic','The number of bytes that reflects the amount of network traffic','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (rate(mongodb_network_bytes_total[1m]))','Network','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Amount of Network Traffic Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_op_counters_total','Number of Operations','The total number of operations since the mongod instance last started','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, type) (rate(mongodb_op_counters_total[1m]))','Request','MongoDB','type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Operations Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_row_lock_waits','Number of Waits for Row Locks','The number of times operations on InnoDB tables had to wait for a row lock','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_row_lock_waits[1m]))','Lock','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Waits for Row Locks Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_execute_count','Execute Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_execute_count[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Execute Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_commits','User Commits','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_commits[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_parse_count','Parse Count','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_parse_count_total[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Parse Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_user_rollbacks','User Rollbacks','Generic counter metric from v$sysstat view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_activity_user_rollbacks[1m]))','Request','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle User Rollback:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_writes','Pod Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Write Bytes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage','Pod Memory Usage (%)','Pod Memory Usage Compared to Limit','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / ((container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024)','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Utillization:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_usage_bytes','Pod Memory Used (GiB)','Current memory usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_cache_hit_ratio','Buffer Cache Hit Ratio (%)','Number of Block Cache Hit / (Number of Block Cache Hit & Blocks Reads) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (increase(pg_stat_database_blks_hit[1h]) / (increase(pg_stat_database_blks_read[1h]) + increase(pg_stat_database_blks_hit[1h])) * 100)','Block','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-08-27 15:49:21.000','2019-12-13 01:33:39.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_other','Wait-Time - Other','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_other[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Other:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_configuration','Wait-Time - Configuration','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_configuration[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Configuration{{humanize $value}}|{threshold}','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_commit','Wait-Time - Commit','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_commit[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Commit:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_scheduler','Wait-Time - Scheduler','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_scheduler[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Scheduler:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_system_io','Wait-Time - System I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_system_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - System I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_user_io','Wait-Time - User I/O','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_user_io[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - User I/O:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_network','Wait-Time - Network','Generic counter metric from v$waitclassmetric view in Oracle','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(oracledb_wait_time_network[1m]))','Wait','OracleDB',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Wait-Time - Network:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-01-28 13:03:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_blocked_clients','Blocked Clients','Number of clients pending on a blocking call (BLPOP, BRPOP, BRPOPLPUSH)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_blocked_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Blocked Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connected_clients','Connected Clients','Number of client connections (excluding connections from replicas)','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_connected_clients)','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Connected Clients:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_connections_received','Received Connections','Total number of connections accepted by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_connections_received_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Received Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_rejected_connections','Rejected Connections','Number of connections rejected because of maxclients limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_rejected_connections_total[1m]))','Connection','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Rejected Connections:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_up','Redis Up Count','Whether the Redis server is up','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_up)','Instance','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Up Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_total','Call Count / Command','Total number of calls per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_total[1m]))','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Call Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_processed','Processed Commands','Total number of commands processed by the server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_commands_processed_total[1m]))','Request','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace:}} Redis Processed Commands:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_key_hit_raito','Redis key hit raito','redis key hit raito','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_keyspace_hits_total [1m]), "data_type", "hits", "" , "") or +label_replace(rate(redis_keyspace_misses_total [1m]), "data_type", "misses", "" , "") )','Keyspace','Redis','data_type',true,false,'redis key hit raito','2020-01-29 02:28:03.000','2020-02-13 00:46:27.568'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_net_byte_total','Network byte','Network byte','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_net_input_bytes_total [1m]), "data_type", "input", "", "") or +label_replace(rate(redis_net_output_bytes_total [1m]), "data_type", "output", "", ""))','Network','PostgreSQL','data_type',true,false,'Network byte','2020-01-30 07:22:12.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_cache','Pod Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_cache{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_swap','Pod Memory Swap (GiB)','Pod swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) (container_memory_swap{xm_entity_type=''Container'',{filter}}) / 1073741824','Memory','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_wait_time_total','Oracledb wait time total','oracledb wait time total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_wait_time_scheduler[1m]), "data_type", "scheduler", "", "") or +label_replace(rate(oracledb_wait_time_commit[1m]), "data_type", "commit", "", "") or +label_replace(rate(oracledb_wait_time_network[1m]), "data_type", "network", "", "") or +label_replace(rate(oracledb_wait_time_concurrency[1m]), "data_type", "concurrency", "", "") or +label_replace(rate(oracledb_wait_time_Configuration[1m]), "data_type", "configuration", "", "") or +label_replace(rate(oracledb_wait_time_user_io[1m]), "data_type", "user_io", "", "") or +label_replace(rate(oracledb_wait_time_system_io[1m]), "data_type", "system_io", "", "") or +label_replace(rate(oracledb_wait_time_other[1m]), "data_type", "other", "", ""))','Wait','OracleDB','data_type',true,false,'oracledb wait time total','2020-01-29 11:03:20.000','2020-02-13 01:08:01.629'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_activity_count','Oracledb activity count','oracledb activity count','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_execute_count [1m]), "data_type", "excutecount", "", "") or +label_replace(rate(oracledb_activity_parse_count_total[1m]), "data_type", "parse_count", "", "") )','Request','OracleDB','data_type',true,false,'oracledb activity count','2020-01-29 10:40:58.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_transaction','Oracledb transaction','oracledb transaction','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(oracledb_activity_user_rollbacks[1m]), "data_type", "rollbacks", "", "") or +label_replace(rate(oracledb_activity_user_commits[1m]), "data_type", "commits", "", ""))','Request','OracleDB','data_type',true,false,'oracledb transaction','2020-01-29 11:20:47.000','2020-02-13 01:26:28.558'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_cpu_usage','Redis cpu usage','redis cpu usage','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_used_cpu_sys [1m]), "data_type", "system", "", "") or +label_replace(rate(redis_used_cpu_user [1m]), "data_type", "user", "", "") )','CPU','Redis','data_type',true,false,'redis cpu usage','2020-01-29 01:56:58.000','2020-02-12 04:47:21.228'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_total_load','host total load','host total load','sum by (instance, data_type) ( +label_replace(node_load1 {{filter}}, "data_type", "load 1", "", "") or +label_replace(node_load5 {{filter}}, "data_type", "load 5", "", "") or +label_replace(node_load15 {{filter}}, "data_type", "load15", "", "") )','CPU','Host',NULL,false,false,'host total load','2020-04-01 08:10:26.588','2020-04-03 01:23:47.665'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys_children','System CPU Used Background','System CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used Backedground:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_hits','Keyspace Hits','Number of successful lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_hits_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Hits:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_keyspace_misses','Keyspace Misses','Number of failed lookup of keys in the main dictionary','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_keyspace_misses_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Keyspace Misses:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys','DB Keys Count','Total number of keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_expired_keys','Expired Keys','Total number of key expiration events','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_expired_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Expired Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_evicted_keys','Evicted Keys','Number of evicted keys due to maxmemory limit','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_evicted_keys_total[1m]))','Keyspace','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Evicted Keys:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_db_keys_expiring','DB Keys Count Expiring','Total number of expiring keys by DB','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, db) (redis_db_keys_expiring)','Keyspace','Redis','db',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis DB Keys Count Expiring:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_commands_duration_seconds','Duration Seconds / Command','Total duration seconds per command','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, cmd) (rate(redis_commands_duration_seconds_total[1m]) * 1000)','Request','Redis','cmd',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Duration Seconds:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-29 01:42:36.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_total','Redis memory total','redis memory total','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(redis_allocator_active_bytes / 1048576, "data_type", "active", "" , "") or +label_replace(redis_memory_used_bytes / 1048576, "data_type", "used", "" , "") or +label_replace(redis_allocator_allocated_bytes / 1048576, "data_type", "allocated", "" , "") or +label_replace(redis_allocator_resident_bytes / 1048576, "data_type", "resident", "" , "") )','Memory','Redis','data_type',true,false,'redis memory total','2020-01-29 02:08:28.000','2020-02-13 00:45:28.475'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('count_by_connection_type','Count by connection type','count by connection type','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(redis_connections_received_total [1m]), "data_type", "received connections", "", "") or +label_replace(rate(redis_rejected_connections_total [1m]), "data_type", "rejected connections", "", "") or +label_replace(redis_connected_clients, "data_type", "connected clients", "", "") or +label_replace(redis_blocked_clients, "data_type", "blocked clients", "", "") )','Connection','Redis','data_type',true,false,'count by connection type','2020-01-29 00:49:09.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_tup_count','Number of row by stat','Number of row by stat','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_tup_deleted[1m]), "data_type", "deleted", "", "") or +label_replace(rate(pg_stat_database_tup_updated[1m]), "data_type", "updated", "", "") or +label_replace(rate(pg_stat_database_tup_inserted[1m]), "data_type", "inserted", "", "") or +label_replace(rate(pg_stat_database_tup_returned[1m]), "data_type", "returned", "", "") or +label_replace(rate(pg_stat_database_tup_fetched[1m]), "data_type", "fetched", "", "") )','Row','PostgreSQL','data_type',true,true,'Number of row by stat','2019-10-28 07:29:26.000','2020-02-13 01:04:18.528'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_blk_read_write_time','Read/Write spent time by file blocks','Read/Write spent time by file blocks','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, data_type) +(label_replace(rate(pg_stat_database_blk_read_time [1m]), "data_type", "read", "", "") or +label_replace(rate(pg_stat_database_blk_write_time [1m]), "data_type", "write", "", ""))','Block','PostgreSQL','data_type',true,false,'Read/Write spent time by file blocks','2019-10-28 10:56:48.000','2020-02-13 01:06:46.680'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_allocator_resident_size','Resident Memory (MiB)','The total amount of resident memory that the Redis allocator has','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_allocator_resident_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Resident Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_memory_used_size','Used Memory (MiB)','Total number of bytes allocated by Redis using its allocator','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (redis_memory_used_bytes) / 1048576','Memory','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis Used Memory:{{humanize $value}}MiB|{threshold}MiB.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_clientrequest_anormal_count','Number of anormal request','Number of anormal request ','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, anormal_type) +(label_replace(rate(cassandra_clientrequest_unavailables_count[1m]), "anormal_type", "unavailables", "", "") or +label_replace(rate(cassandra_clientrequest_timeouts_count[1m]), "anormal_type", "timeouts", "", "") or +label_replace(rate(cassandra_clientrequest_failures_count[1m]), "anormal_type", "failures", "", ""))','Request','Cassandra','anomal_type',true,false,'Number of anormal request ','2019-10-28 02:09:45.000','2020-02-13 01:16:24.862'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_commitlog','Commitlog count and size','Commitlog count and size','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) +(label_replace(rate(cassandra_commitlog_completedtasks {{filter}}[1m]), "data_type", "log_count", "", "") or +label_replace(rate(cassandra_commitlog_totalcommitlogsize {{filter}}[1m]) / 1048576, "data_type", "log_size", "", ""))','Log','Cassandra','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Cassandra Cache Hit Rate:{{humanize $value}}|{threshold}.','2019-10-24 10:44:47.000','2020-02-13 01:16:24.864'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_threads_total','Number of Threads','Number of Threads','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_threads_running, "data_type", "active", "", "") or +label_replace(mysql_global_status_threads_connected, "data_type", "connected", "", "") or +label_replace(rate(mysql_global_status_connections [1m]), "data_type", "connection attempts[1m]", "", "") )','Thread','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Threads Running Counts:{{humanize $value}}|{threshold}.','2019-12-05 06:04:21.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cassandra_read_write_count','Local read write count','Local read write count','sum by(xm_clst_id, xm_namespace, xm_node_id, instance, type) +(label_replace( rate(cassandra_keyspace_readlatency_seconds_count [1m]), "type", "read", "", "") or +label_replace( rate(cassandra_keyspace_writelatency_seconds_count [1m]), "type", "write", "", ""))','Disk','Cassandra','type',true,true,'Local read write count','2019-10-24 05:18:50.000','2020-02-13 01:23:46.608'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_lock_total','Oracledb lock total','oracledb lock total','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, resource_name) +(oracledb_resource_current_utilization{resource_name =~''.+_locks''})','Resource','OracleDB','resource_name',true,false,'oracledb lock total','2020-01-29 11:17:01.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec_by_api','Service HTTP Requests Count by API (per Second)','the number of HTTP requests counts per second by API','(sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value)','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_core_count','Host CPU Core Count','Host_cpu_capacity_cores','count without(cpu, mode) (node_cpu_seconds_total{{filter}})','CPU','Host',NULL,true,false,'None','2020-03-23 04:08:05.290','2020-03-23 04:08:05.290'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load5','Host CPU Load 5m Average','Host CPU 5m load average','node_load5{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 5m Load Average:{{humanize $value}}%|{threshold}$.','2020-03-23 04:08:11.655','2020-03-23 04:08:11.655'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_cluster','Pod Phase Count by Cluster','pod phase count by cluster','count by(xm_clst_id, pod_state) (sum by (xm_clst_id, xm_pod_id, pod_state)(rate(imxc_kubernetes_container_resource_limit_cpu{{filter}}[1m])))','Cluster','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_network_io_byte','host network io byte','host network io byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]) or rate(node_network_receive_bytes_total{{filter}}[5m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]) or rate(node_network_transmit_bytes_total{{filter}}[5m]), "data_type", "Transmit", "", "") )','Network','Host',NULL,false,false,'host network io byte','2020-03-24 05:48:31.359','2020-03-24 05:48:31.359'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_contextswitch_and_filedescriptor','host contextswitch and filedescriptor','host contextswitch and filedescriptor','sum by (data_type, instance) ( +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "Context switch", "", "") or +label_replace(node_filefd_allocated {{filter}}, "data_type", "File descriptor", "", "") )','OS','Host',NULL,false,false,'host contextswitch and filedescriptor','2020-03-24 09:05:51.828','2020-03-24 09:08:06.867'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_usage','Host Swap Memory Usage (%)','Host Swap Memory Usage','node_memory_SwapTotal_bytes{{filter}} - node_memory_SwapFree_bytes{{filter}} / node_memory_SwapTotal_bytes{{filter}} +','Memory','Host',NULL,true,false,'None','2020-03-26 06:39:21.333','2020-03-26 06:39:21.333'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_boot_time','Host Boot time','Host Boot time','node_boot_time_seconds{{filter}}','CPU','Host',NULL,true,false,'None','2020-03-26 08:03:46.189','2020-03-26 08:03:46.189'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_read_latency','Host read Disk latency','Host disk read latency','sum by (instance) (rate(node_disk_reads_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_read_time_seconds_total{{filter}}[1m])/rate(node_disk_reads_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Read Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:34.001','2020-03-23 04:08:34.001'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_write_latency','Host write Disk latency','Host disk write latency','sum by (instance) (rate(node_disk_writes_completed_total{{filter}}[1m])) == 0 or sum by (instance) (rate(node_disk_write_time_seconds_total{{filter}}[1m])/rate(node_disk_writes_completed_total{{filter}}[1m]) >= 0 )','Disk','Host',NULL,true,false,'Host:{{$labels.instance}} Disk Write Latency:{{humanize $value}}|{threshold}.','2020-03-23 04:08:35.823','2020-03-23 04:08:35.823'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_usage','Host Memory Usage (%)','Host Memory Usage ','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}})) / node_memory_MemTotal_bytes{{filter}} * 100','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Usage:{{humanize $value}}%|{threshold}%.','2020-03-26 06:36:47.931','2020-03-26 06:36:47.931'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_total','Host Memory Total (GiB)','Memory information field MemTotal_bytes','node_memory_MemTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:16.897','2020-03-23 04:08:16.897'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_bytes_received_sent','Bytes Received & Sent in MySQL','Bytes Received & Sent in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_bytes_received [1m]), "data_type", "received", "", "") or +label_replace(rate(mysql_global_status_bytes_sent [1m]), "data_type", "sent", "", ""))','Network','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Container:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}|{threshold}.','2019-12-05 07:58:11.000','2020-02-13 01:12:05.436'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_95th','Service HTTP 95% Elapsed Time (ms)','the maximum time taken to servce the 95% of HTTP requests','histogram_quantile(0.95, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 95th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_99th','Service HTTP 99% Elapsed Time (ms)','the maximum time taken to servce the 99% of HTTP requests','histogram_quantile(0.99, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 99th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_error_rate','Service Pod HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Pod Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-11-07 07:52:24.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_90th','Service HTTP 90% Elapsed Time (ms)','the maximum time taken to servce the 90% of HTTP requests','histogram_quantile(0.90, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 90th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_fs_total_by_mountpoint','host filesystem size by mountpoint','host filesystem size by mountpoint','sum by(instance, mountpoint, fstype, data_type) ( +label_replace(node_filesystem_size_bytes {fstype!="rootfs",{filter}}, "data_type", "totalsize", "", "") or +label_replace(node_filesystem_avail_bytes {fstype!="rootfs",{filter}}, "data_type", "availablesize", "", ""))','Filesystem','Host',NULL,false,false,'host filesystem size by mountpoint','2020-03-30 04:01:45.322','2020-03-30 05:16:32.252'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_timeline_count','Namespace timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id, xm_namespace, level)','Timeline','Namespace',NULL,false,false,'None','2020-04-08 06:21:21.392','2020-04-08 06:21:21.392'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_timeline_count','Cluster timeline count','alert, event count','sum (floor(increase(imxc_kubernetes_event_counts{{filter}}[10m])) or floor(increase(imxc_alerts_received_count_total{status="firing", {filter}}[10m])))by (xm_clst_id,level)','Timeline','Cluster',NULL,false,false,'None','2020-04-08 06:19:32.792','2020-04-28 08:07:47.786'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_transmit','Cluster Network Transmit','Cluster Network Transmit','sum by (xm_clst_id) (rate(node_network_transmit_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Transmit','2020-04-28 08:10:21.070','2020-04-28 08:29:18.491'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('cluster_network_receive','Cluster Network Receive','Cluster Network Receive','sum by (xm_clst_id) (rate(node_network_receive_bytes_total{{filter}} [1m]))','Network','Cluster',NULL,true,true,'Cluster Network Receive','2020-04-28 08:07:26.294','2020-04-28 08:29:18.486'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('namespace_pod_running_count','Namespace Pod Running Count','Running pod count by namespace','count by (xm_clst_id, xm_namespace) (sum by (xm_clst_id, xm_node_id, xm_namespace, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{pod_state="Running", {filter}}))','Pod','Namespace',NULL,false,false,'None','2020-05-21 01:18:06.016','2020-05-21 01:18:06.016'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_request','Pod CPU Request','Pod CPU Request','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_io_byte','Node Network IO byte','Node Network IO byte','sum by (data_type, instance) ( +label_replace(rate(node_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or +label_replace(rate(node_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", "") )','Network','Node',NULL,false,false,'Node Network IO byte','2020-05-21 07:32:03.535','2020-05-21 07:32:03.535'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_request','pod_memory_request (Gib)','Total container memory request in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_request_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_node','Container memory sum by node','Container memory sum by node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_memory{{filter}}, "data_type", "capacity" , "", "") or +label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or +label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or +label_replace(container_memory_working_set_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Node',NULL,false,false,'Container memory sum by node','2020-05-28 09:36:44.000','2020-06-09 01:38:10.694'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_context_switches','Node Context Switches','Node Context Switches','rate(node_context_switches_total {{filter}}[1m])','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:05.521'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_contextswitch_and_filedescriptor','Node contextswitch and filedescriptor','Node contextswitch and filedescriptor','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(node_filefd_allocated {{filter}}, "data_type", "file descriptor" , "", "") or +label_replace(rate(node_context_switches_total {{filter}}[1m]), "data_type", "context switches", "" , ""))','File','Node',NULL,false,false,'Node contextswitch and filedescriptor','2020-05-28 12:38:21.587','2020-05-28 12:38:21.587'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_read_write_byte','Node disk read and write bytes','Node disk read and write bytes','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(rate(node_disk_read_bytes_total{{filter}}[1m]), "data_type", "Read" , "", "") or +label_replace(rate(node_disk_written_bytes_total{{filter}}[1m]), "data_type", "Write", "" , "") +)','Disk','Node',NULL,false,false,'Node disk read and write bytes','2020-05-28 13:02:44.729','2020-05-28 13:04:35.126'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_swap_total','Host Swap Memory Total','Host Swap Total','node_memory_SwapTotal_bytes{{filter}}','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Total Swap Memory Size:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:23.130','2020-03-23 04:08:23.130'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_iowait','Host CPU iowait','Host CPU iowait','avg by (instance) (rate(node_cpu_seconds_total{mode=''iowait'',{filter}}[1m])) * 100','CPU','Host',NULL,false,false,'Host:{{$labels.instance}} CPU IO wait:{{humanize $value}}|{threshold}.','2020-03-26 08:03:51.307','2020-03-26 08:03:51.307'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_filefd_allocated','Host statistics Filesystem allocated.','Host File descriptor statistics: allocated.','sum by (instance) (node_filefd_allocated{{filter}})','Filesystem','Host',NULL,true,false,'Host:{{$labels.instance}} Filesystem allocated:{{humanize $value}}|{threshold}.','2020-03-23 04:08:31.970','2020-03-23 04:08:31.970'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg','Service HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests','sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) == 0 or +sum (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) +/ sum (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace)','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-10-15 09:37:44.000','2020-03-09 06:42:14.172'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate_by_api','Service HTTP Requests Error Rate by API','the number of HTTP error counts by API / the number of HTTP requests counts by API','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) ==0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.498'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_avg_by_api','Service HTTP Average Elapsed Time by API (ms)','the average time taken to serve the HTTP requests by API for a service','sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,api) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,false,false,'not for alarm','2020-02-18 12:12:12.000','2020-06-03 06:52:05.500'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_used','Node CPU Used (Cores)','Node CPU Used (Cores)','(100 - (avg by (xm_clst_id, xm_node_id) (clamp_max(rate(node_cpu_seconds_total{name="node-exporter", mode="idle", xm_entity_type="Node", {filter}}[1m]),1.0)) * 100)) * sum by(xm_clst_id, xm_node_id)(imxc_kubernetes_node_resource_capacity_cpu{{filter}}) / 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:35.939'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_cpu_iowait','Node CPU I/O Wait','Node CPU I/O Wait','avg by (xm_clst_id, xm_node_id, xm_entity_type) (rate(node_cpu_seconds_total{name="node-exporter", mode="iowait", xm_entity_type="Node" , {filter}}[1m])) * 100','CPU','Node',NULL,false,false,'None','2020-05-21 01:18:06.000','2020-05-29 09:38:20.633'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_node','Container cpu sum by Node','Container cpu sum by Node','sum by(xm_clst_id, xm_node_id, data_type) ( +label_replace(imxc_kubernetes_node_resource_capacity_cpu{{filter}} * 0.001, "data_type", "capacity" , "", "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001), "data_type", "limit", "" , "") or +label_replace(sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001), "data_type", "request", "" , "") or +label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Node',NULL,false,false,'Container cpu sum by Node','2020-05-28 08:06:35.736','2020-06-09 01:46:12.446'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops_per_device','Node Disk IOPs per device','Node Disk I/O Operations Per Second (per device)','sum by (xm_clst_id, xm_node_id, device) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node','device',false,false,'None','2020-06-10 05:56:05.311','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_iops','Node Disk IOPs','Node Disk I/O Operations Per Second','sum by (xm_clst_id, xm_node_id) (rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m]))','Disk','Node',NULL,false,false,'None','2020-06-10 05:54:01.309','2020-06-10 07:24:15.462'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_disk_iops','Host Disk IOPs','Host Disk IOPs','sum by (instance) ((rate(node_disk_reads_completed_total{{filter}}[1m]) + rate(node_disk_writes_completed_total{{filter}}[1m])) or (rate(node_disk_reads_completed_total{{filter}}[5m]) + rate(node_disk_writes_completed_total{{filter}}[5m])))','Disk','Node',NULL,false,false,'Host Disk IOPs','2020-06-10 07:26:28.895','2020-06-10 07:26:28.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_cpu_limit','Pod CPU Limit','Pod CPU Limit','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_memory_limit','pod_memory_limit (Gib)','Total container memory limit in GiB for the given pod','sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_memory{{filter}}) / 1073741824','Memory','Pod',NULL,false,false,'None','2020-05-21 11:50:52.717','2020-05-21 11:50:52.717'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage_bytes','Container Memory Used (GiB)','Current memory usage in GiB, this includes all memory regardless of when it was accessed','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}} / 1024 / 1024 / 1024)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_memory_used','Node Memory Used (GIB)','Node Memory Used (GIB)','((node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - (node_memory_MemFree_bytes{xm_entity_type="Node", {filter}} + node_memory_Cached_bytes{xm_entity_type="Node", {filter}} + node_memory_Buffers_bytes{xm_entity_type="Node", {filter}})) >= 0 or node_memory_MemTotal_bytes{xm_entity_type="Node", {filter}} - node_memory_MemFree_bytes{xm_entity_type="Node", {filter}}) / 1024 / 1024 / 1024','Memory','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Memory Used:{{humanize $value}}GiB|{threshold}GiB.','2020-05-21 01:18:06.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user','User CPU Used','User CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user[1m]))','CPU','Redis',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-05-29 09:37:22.273'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_container','Container cpu sum by container','container cpu sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_request_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{xm_cont_name!=''POD'',{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{xm_cont_name!=''POD'',{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_sum_by_pods','Container cpu sum by pod','Container cpu sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_request_cpu{{filter}} * 0.001, "data_type", "request" , "", "") or label_replace(imxc_kubernetes_container_resource_limit_cpu{{filter}} * 0.001, "data_type", "limit" , "", "") or label_replace(rate(container_cpu_usage_seconds_total{{filter}}[1m]), "data_type", "used", "" , ""))','CPU','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_pods','Container memory sum by pod','Container memory sum by pod','sum by(xm_clst_id, data_type, xm_pod_id) (label_replace(imxc_kubernetes_container_resource_limit_memory{{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{{filter}}, "data_type", "used", "" , ""))','Memory','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_sum_by_container','Container memory sum by container','Container memory sum by container','sum by(xm_clst_id, data_type, xm_pod_id, xm_cont_name) (label_replace(imxc_kubernetes_container_resource_limit_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "limit", "" , "") or label_replace(imxc_kubernetes_container_resource_request_memory{xm_cont_name!=''POD'',{filter}}, "data_type", "request", "" , "") or label_replace(container_memory_usage_bytes{xm_cont_name!=''POD'',{filter}}, "data_type", "used", "" , ""))','Memory','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_disk_read_write_byte','Container disk read and write bytes','Container disk read and write bytes','sum by(xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_disk_read_write_byte','Pod disk read and write bytes','Pod disk read and write bytes','sum by(xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_fs_writes_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Read" , "", "") or label_replace(rate(container_fs_reads_bytes_total{xm_entity_type="Container",{filter}}[1m]), "data_type", "Write", "" , ""))','Disk','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_io_byte','Container Network IO byte','Container Network IO byte','sum by (xm_clst_id, xm_pod_id, xm_cont_name, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Container',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_network_io_byte','Pod Network IO byte','Pod Network IO byte','sum by (xm_clst_id, xm_pod_id, data_type) (label_replace(rate(container_network_receive_bytes_total{{filter}}[1m]), "data_type", "Receive", "", "") or label_replace(rate(container_network_transmit_bytes_total{{filter}}[1m]), "data_type", "Transmit", "", ""))','Network','Pod',NULL,false,false,'None','2020-05-21 06:50:49.546','2020-05-21 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_load1','Node CPU Load 1m Average','Node CPU 1m load average','node_load1{xm_entity_type=''Node'',{filter}}','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} CPU 1m Load Avg:{{humanize $value}}|{threshold}.','2019-05-15 08:22:49.000','2019-05-15 08:22:49.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_open_file_descriptor','Node File Descriptor','Node File Descriptor','sum by(xm_clst_id, xm_node_id)(node_filefd_allocated {{filter}})','Filesystem','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} File Descriptor:{{humanize $value}}|{threshold}.','2020-05-21 01:18:06.000','2020-05-29 09:37:51.101'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_sparselog_type_node_count','Node Type Sparselog Count','Node-type sparse log count by xm_clst_id, xm_node_id over last 1 min','sum by (xm_entity_type, xm_clst_id, xm_node_id) (round(increase(imxc_sparselog_count_total{xm_entity_type="Node",{filter}}[1m])))','SparseLog','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Sparselog Count:{{humanize $value}}|{threshold}.','2020-03-26 15:05:51.828','2020-03-26 15:05:51.828'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_cache','Container Memory Cache (GiB)','Number of bytes of page cache memory / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_cache{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load15','Host CPU Load 15m Average','Host CPU 15m load average','node_load15{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 15m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:13.337','2020-03-23 04:08:13.337'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_bytes_device','Node Disk Write Bytes per Device (KiB)','The total number of bytes written successfully / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_written_bytes_total{xm_entity_type=''Node'', {filter}}[1m]) ) / 1024','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Write Size:{{humanize $value}}KiB|{threshold}KiB.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_write_latency','Node Disk Write Latency (ms)','Node Disk Write Latency','sum by (xm_clst_id,xm_node_id, xm_entity_type) (rate(node_disk_write_time_seconds_total{xm_entity_type=''Node'',{filter}}[1m])) * 1000','Disk','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Disk Write Latency:{{humanize $value}}ms|{threshold}ms.','2019-05-20 11:00:56.000','2019-05-31 17:47:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_disk_writes_count_device','Node Disk Writes Count per Device (IOPS)','Node Disk Writes Counts per Device','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_disk_writes_completed_total{xm_entity_type=''Node'', {filter}}[1m]) )','Disk','Node','device',true,false,'NODE:{{$labels.xm_node_id}} FS:{{$labels.mountpoint}} Disk Writes Count:{{humanize $value}}IOPS|{threshold}IOPS.','2019-08-23 11:26:07.000','2019-08-23 11:26:07.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_throttled_rate','Container CPU Throttled Rate','container throttled rate','sum by(xm_clst_id, xm_cont_id) (rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD", image!="",{filter}}[1m]))','Cluster','Container',NULL,false,false,'CLST:{{$labels.xm_clst_id}} CPU Throttled:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_total_count','Node Pod Total Count','Node Pod Total Count','count by (xm_clst_id, xm_node_id) (sum by (xm_clst_id, xm_node_id, xm_pod_id) (imxc_kubernetes_container_resource_limit_cpu{{filter}}))','Pod','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod Count:{{humanize $value}}|{threshold}.','2019-10-11 00:29:17.000','2019-11-26 01:29:10.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_per_sec','Service HTTP Requests Count (per Second)','the number of HTTP requests counts per second','((sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))/ on (xm_clst_id, xm_namespace, xm_service_name ) group_left imxc_sampling_param_value) or (sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / on (xm_clst_id) group_left imxc_sampling_default_param_value))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Http Requests/Second:{{humanize $value}}|{threshold}.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_per_sec','Service Pod HTTP Requests Count (per Second)','the number of HTTP requets counts per second for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod Http Requests/Seconds:{{humanize $value}}|{threshold}.','2019-11-07 07:51:11.000','2020-03-09 06:34:19.353'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_max_usage_bytes','Container Memory Max Used (GiB)','Maximum memory usage recorded in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_max_usage_bytes{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_receive','Container Network Receive (KiB)','Network device statistic receive_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_receive_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:23:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_requests_time_50th','Service HTTP 50% Elapsed Time (ms)','the maximum time taken to servce the 50% of HTTP requests','histogram_quantile(0.50, sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name,le) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))) >=0 or sum by (xm_entity_type,xm_clst_id,xm_namespace,xm_service_name) (rate(imxc_service_request_milliseconds_bucket{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} 50th HTTP Requests Time:{{humanize $value}}ms|{threshold}ms.','2020-02-18 12:12:12.000','2020-02-18 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_errors_count','Service Error Count','service error count','sum by(xm_clst_id, xm_namespace, xm_service_name, statuscode ) (imxc_service_errors_count{statuscode!="200",{filter}}) OR on() vector(0)','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Error Count:{{humanize $value}}|{threshold}.','2020-08-21 16:45:00.000','2020-08-21 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_memory_used','Host Memory Used (GiB)','Memory information field MemUsed_bytes','((node_memory_MemTotal_bytes{{filter}} - (node_memory_MemFree_bytes{{filter}} + node_memory_Cached_bytes{{filter}} + node_memory_Buffers_bytes{{filter}} + node_memory_SReclaimable_bytes{{filter}})) >= 0 or (node_memory_MemTotal_bytes{{filter}} - node_memory_MemFree_bytes{{filter}}))','Memory','Host',NULL,true,false,'Host:{{$labels.instance}} Memory Utillization:{{humanize $value}}GiB|{threshold}GiB.','2020-03-23 04:08:21.399','2020-03-23 04:08:21.399'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_all_state','Workload Count All State','workload total count regardless of pod state','count by(xm_clst_id, controller_kind) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('workload_count_running_pod','Workload Count Running Pod','workload count of Running state pod','sum by(xm_clst_id,controller_kind ) (imxc_kubernetes_controller_ready{controller_kind=~"Deployment|DaemonSet|ReplicaSet|StatefulSet|StaticPod",{filter}})','Pod','Namespace',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Workload Total Count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_transmit_device','Node Network Transmit per Device(KiB)','Network device statistic transmit_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_transmit_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_network_receive_device','Node Network Receive per Device(KiB)','Network device statistic receive_bytes by device / 1024','sum by (xm_clst_id, xm_node_id, xm_entity_type, device, mountpoint) (rate(node_network_receive_bytes_total{xm_entity_type=''Node'',{filter}}[1m]) ) / 1024','Network','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} DEV:{{$labels.device}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.','2020-11-06 09:09:05.000','2020-11-06 09:09:05.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_pod_http_requests_time_avg','Service Pod HTTP Average Elapsed Time (ms)','the average time taken to serve the HTTP requests for pod','sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_sum{xm_entity_type="Service",protocol="http",{filter}}[1m])) +/ sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace,xm_pod_id) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} IMXC Svc Pod http Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2019-11-07 07:51:46.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_system','Container CPU System (%)','Container CPU Usage (System)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU System:{{humanize $value}}%|{threshold}%.','2019-06-05 09:07:00.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_cpu_usage','Container CPU Usage (%)','Container CPU Usage','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m])) * 100','CPU','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} CPU Usage:{{humanize $value}}%|{threshold}%','2019-05-15 01:02:23.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_phase_count_by_namespace','Pod Phase Count by Namespace','pod phase count by cluster, namespace','count by(xm_clst_id, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{{filter}})','Namespace','Pod',NULL,true,false,'CLST:{{$labels.xm_clst_id}} Pod phase count:{{humanize $value}}|{threshold}.','2020-08-19 16:45:00.000','2020-08-19 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_limit_bytes','Container Filesystem Limit Bytes (GiB)','Number of bytes that can be consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_limit_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_usage','Container Memory Usage (%)','Container memory usage compared to limit if limit is non-zero or 1GiB if limit is zero','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'', xm_cont_name!=''POD'', {filter}} / (container_spec_memory_limit_bytes{xm_entity_type=''Container'',{filter}} > 0) * 100) or sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_usage_bytes{xm_entity_type=''Container'',{filter}} / 1024 / 1024 / 1024 * 100)','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Memory Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 14:27:36.000','2020-06-04 11:11:11.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_memory_swap','Container Memory Swap (GiB)','Container swap usage in bytes / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_memory_swap{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}) / 1073741824','Memory','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.','2019-06-05 14:27:36.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_network_transmit','Container Network Transmit (KiB)','Network device statistic transmit_bytes / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_network_transmit_bytes_total{xm_entity_type=''Container'',{filter}}[1m]) ) / 1024','Network','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.','2019-05-21 08:26:35.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('controller_pod_count','Controller Pod Count','Controller Pod Count','sum (imxc_kubernetes_controller_counts{{filter}}) by (xm_clst_id, xm_namespace, xm_entity_name, xm_entity_type)','Pod','Controller',NULL,false,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Controller Pod Counts:{{humanize $value}}|{threshold}.','2019-10-10 06:39:09.000','2019-10-10 06:39:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_load1','Host CPU Load 1m Average','Host CPU 1m load average','node_load1{{filter}}','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU 1m Load Average:{{humanize $value}}%|{threshold}%','2020-03-23 04:08:09.946','2020-03-23 04:08:09.946'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('host_cpu_usage','Host CPU Usage (%)','Host CPU Usage','100 - (avg by (instance)(clamp_max(rate(node_cpu_seconds_total{mode=''idle'',{filter}}[1m]),1.0)) * 100)','CPU','Host',NULL,true,false,'Host:{{$labels.instance}} CPU Utillization:{{humanize $value}}%|{threshold}%.','2020-03-23 04:08:07.606','2020-03-23 04:08:07.606'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('aws_ec2_cpuutilization','The percentage of allocated EC2 compute','The percentage of allocated EC2 compute units that are currently in use on the instance.','sum by (xm_clst_id, instance_id, instance) (aws_ec2_cpuutilization_average{{filter}})','CPU','AWS/EC2',NULL,true,true,'CLST:{{$labels.xm_clst_id}} Instance:{{$labels.instance_id}} CPU Utillization:{{humanize $value}}%|{threshold}%','2019-08-23 17:38:23.000','2019-08-23 17:38:23.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mongodb_connections','Number of Incoming Connections','The number of incoming connections from clients to the database server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, state) (mongodb_connections{{filter}})','Connection','MongoDB','state',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MongoDB Number of Incoming Connections Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-13 02:26:09.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_buffer_io','Block read / write','mysql buffer I/O summary','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(mysql_global_status_innodb_buffer_pool_write_requests, "data_type", "write", "", "") or +label_replace(mysql_global_status_innodb_buffer_pool_read_requests, "data_type", "read", "", "") )','Block','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} POD:{{$labels.xm_pod_id}} Mysql Buffer IO:{{humanize $value}}|{threshold}.','2019-12-05 07:30:33.000','2020-02-13 01:14:23.895'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_innodb_buffer_pool_reads','Number of Reads Directly from Disk','The number of logical reads that InnoDB could not satisfy from the buffer pool, and had to read directly from disk','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_innodb_buffer_pool_reads[1m]))','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Reads Directly from Disk Counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_connections','Number of Connection Attempts','The number of connection attempts (successful or not) to the MySQL server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(mysql_global_status_connections[1m]))','Connection','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Number of Connection Attempts counts:{{humanize $value}}|{threshold}.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_status_locks','Number of Locks in MySQL','Number of Locks in MySQL','sum by (data_type, xm_clst_id, xm_namespace, xm_node_id, instance) ( +label_replace(rate(mysql_global_status_innodb_row_lock_current_waits[1m]), "data_type", "rowlocks", "", "") or +label_replace(rate(mysql_global_status_innodb_row_lock_waits[1m]), "data_type", "waits for rowlocks", "", "") or +label_replace(rate(mysql_global_status_table_locks_immediate[1m]), "data_type", "tablelock immediate", "", "") or +label_replace(rate(mysql_global_status_table_locks_waited[1m]), "data_type", "tablelock waited", "", "") )','Lock','MySQL','data_type',true,false,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Mysql Status Locks:{{humanize $value}}|{threshold}.','2019-12-05 08:39:30.000','2020-02-13 01:12:05.438'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_usage_bytes','Container Filesystem Used Bytes (GiB)','Number of bytes that are consumed by the container on this filesystem / 1073741824','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (container_fs_usage_bytes{xm_entity_type=''Container'',{filter}}) / 1073741824','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('container_fs_writes','Container Filesystem Write Bytes (KiB)','Cumulative count of bytes written / 1024','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_fs_writes_bytes_total{xm_entity_type=''Container'',{filter}}[1m])) / 1024','Filesystem','Container',NULL,true,true,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.','2019-05-20 05:58:07.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('oracledb_sessions_value','Session Count','Gauge metric with count of sessions by status and type','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, status_type) +(label_join(oracledb_sessions_value, "status_type", "-", "status", "type"))','Session','OracleDB','status_type',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Oracle Session Count:{{humanize $value}}|{threshold}.','2020-01-28 13:03:00.000','2020-02-13 01:34:00.720'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pg_stat_database_temp_bytes','Bytes Written to Temporary Files (KiB)','Total amount of data written to temporary files by queries in this database','sum by (xm_clst_id, xm_namespace, xm_node_id, instance, datname) (rate(pg_stat_database_temp_bytes[1m])) / 1024','TemporaryFile','PostgreSQL','datname',true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} PostgreSQL Temporary File Write Size:{{humanize $value}}|{threshold}.','2019-08-27 15:49:21.000','2019-08-27 15:49:21.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_sys','System CPU Used','System CPU consumed by the Redis server','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_sys[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis System CPU Used:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('redis_used_cpu_user_children','User CPU Used Background','User CPU consumed by the background processes','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) (rate(redis_used_cpu_user_children[1m]))','CPU','Redis',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} Redis User CPU Used Background:{{humanize $value}}|{threshold}.','2020-01-28 14:33:00.000','2020-01-28 14:33:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_http_error_rate','Service HTTP Requests Error Rate','the number of HTTP error counts / the number of HTTP requests counts','sum by(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) == 0 or +sum by (xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_errors_count{xm_entity_type="Service",protocol="http",{filter}}[1m])) / sum by +(xm_clst_id,xm_service_name,xm_entity_type,xm_namespace) (rate(imxc_service_request_milliseconds_count{xm_entity_type="Service",protocol="http",{filter}}[1m]))','Request','Service',NULL,true,true,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2019-10-15 09:37:44.000','2020-02-17 12:12:12.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('mysql_global_status_cache_hit_ratio','Buffer Cache Hit Ratio (%)','(Number of Logical Read - Number of Reads Directly from Disk) / (Number of Logical Read) * 100','sum by (xm_clst_id, xm_namespace, xm_node_id, instance) ((increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) - increase(mysql_global_status_innodb_buffer_pool_reads[1m])) / increase(mysql_global_status_innodb_buffer_pool_read_requests[1m]) * 100)','Block','MySQL',NULL,true,true,'CLST:{{$labels.xm_clst_id}} NS:{{$labels.xm_namespace}} MySQL Buffer Cache Hit Ratio:{{humanize $value}}%|{threshold}%.','2019-12-04 16:45:00.000','2019-12-04 16:45:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('pod_fs_usage','Pod Filesystem Usage (%)','Pod File System Usage: 100 * (Used Bytes / Limit Bytes)','sum by (xm_clst_id,xm_pod_id,xm_entity_type,xm_namespace) ( +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} /((container_fs_limit_bytes{xm_entity_type=''Container'',{filter}} * 100) > 0) or +container_fs_usage_bytes{xm_entity_type=''Container'',{filter}} / 1000)','Filesystem','Pod',NULL,true,true,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.','2019-06-05 10:27:42.000','2019-06-29 03:28:14.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_request','Node Pod CPU Request','Node Pod CPU Request','sum by (xm_clst_id, xm_node_id) (imxc_kubernetes_container_resource_request_cpu{{filter}})','CPU','Node',NULL,true,false,'NODE:{{$labels.xm_node_id}} Pod CPU Requests:{{humanize $value}}|{threshold}.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_pod_cpu_usage','Node Pod CPU Usage (%)','Node Pod CPU Usage','sum by (xm_clst_id,xm_node_id) (clamp_min((rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',{filter}}[1m] offset 10s)),0)) * 100','CPU','Node',NULL,true,true,'NODE:{{$labels.xm_node_id}} Pod CPU Usage:{{humanize $value}}%|{threshold}%.','2020-11-20 06:50:49.546','2020-11-20 06:50:49.546'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_usage_core','Container CPU Usage (Core)','Container CPU Usage (Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_usage_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_system_core','Container CPU System (Core)','Container CPU Usage (System)(Core)','sum by (xm_clst_id,xm_node_id,xm_pod_id,xm_cont_name,xm_entity_type,xm_namespace,xm_cont_id) (rate(container_cpu_system_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,in_use,anomaly_score,message) + VALUES ('container_cpu_user_core','Container CPU User (Core)','Container CPU Usage (User)(Core)','sum by (xm_clst_id,xm_pod_id,xm_cont_id,xm_cont_name,xm_entity_type,xm_namespace,xm_node_id) (rate(container_cpu_user_seconds_total{xm_entity_type=''Container'',xm_cont_name!=''POD'',{filter}}[1m]))','CPU','Container',true,false,'None'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_service','pod info in service','pod info(state, node) in service','sum by (xm_clst_id, xm_namespace, xm_service_name,xm_node_id,node_status,xm_pod_id,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2020-12-22 16:05:00.000','2020-12-22 16:05:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_service_state','Service State Count Sum','service state sum by xm_service_name','sum by (xm_service_name,pod_state) (imxc_kubernetes_endpoint_count{{filter}})','Pod','Service',NULL,false,false,'None','2021-01-06 17:30:00.000','2021-01-06 17:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_workload_state','Workload State Count Sum','wokload state sum by owner_name','count by (owner_name, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_pod_info_in_workload','Pod info by workload type','pod info(state, node) by workload type (do filter param)','count by (xm_clst_id, xm_namespace, owner_name, xm_node_id, node_status, xm_pod_id, pod_state) (imxc_kubernetes_container_resource_request_cpu{{filter}})','Pod','Workload',NULL,false,false,'None','2021-02-08 17:00:00.000','2021-02-08 17:00:00.000'); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('node_up_state','Node State metric','Node State metric for up, down check','imxc_kubernetes_node_ready{{filter}}','State','Node',NULL,true,false,'Cluster:{{$labels.xm_clst_id}} Node:{{$labels.xm_node_id}} Down {threshold}.','2020-02-02 14:30:00.000','2020-02-02 14:30:00.000'); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_by_workload', 'Container CPU User By workload (%)', 'Container CPU Usage(User)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_core_by_workload', 'Container CPU System By workload (Core)', 'Container CPU(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (Core) (System):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_core_by_workload', 'Container CPU Usage By workload (Core)', 'Container CPU Usage (Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_user_core_by_workload', 'Container CPU User By workload (Core)', 'Container CPU Usage (User)(Core)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_user_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, FALSE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU User (Core):{{humanize $value}}|{threshold}.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_system_by_workload', 'Container CPU System By workload (%)', 'Container CPU Usage (System)', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_system_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) * 100', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU System (%):{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_cpu_usage_by_workload', 'Container CPU Usage By workload (%)', 'Container CPU Usage', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_cpu_usage_seconds_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0)', 'CPU', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} CPU Usage (%):{{humanize $value}}%|{threshold}%', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_reads_by_workload', 'Container Filesystem Read Bytes By workload (KiB)', 'Cumulative count of bytes read / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_reads_bytes_total{xm_cont_name!="POD"} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Reads:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_limit_bytes_by_workload', 'Container Filesystem Limit Bytes By workload (GiB)', 'Number of bytes that can be consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_limit_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running", {filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Limit:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_bytes_by_workload', 'Container Filesystem Used Bytes By workload (GiB)', 'Number of bytes that are consumed by the container on this filesystem / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_fs_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Used:{{humanize $value}}GiB||{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_writes_by_workload', 'Container Filesystem Write Bytes By workload (KiB)', 'Cumulative count of bytes written / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_fs_writes_bytes_total{xm_cont_name!="POD"}[1m]) + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Writes:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_fs_usage_by_workload', 'Container Filesystem Usage By workload (%)', 'Container File System Usage: 100 * (Used Bytes / Limit Bytes) (not contain persistent volume)', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)/ (((container_fs_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) * 100) > 0) or (container_fs_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1000)', 'Filesystem', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Filesystem Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_max_usage_bytes_by_workload', 'Container Memory Max Used By workload (GiB)', 'Maximum memory usage recorded in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_max_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Max Memory Usage:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_bytes_by_workload', 'Container Memory Used By workload (GiB)', 'Current memory usage in GiB, this includes all memory regardless of when it was accessed', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_usage_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Used Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_usage_by_workload', 'Container Memory Usage By workload (%)', 'Container Memory usage compared to limit if limit is non-zero or 1GiB if limit is zero', 'sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / (((container_spec_memory_limit_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0)) > 0) * 100) or sum by (xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) ((container_memory_usage_bytes{xm_entity_type="Container", xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024 *100))', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Memory Usage:{{humanize $value}}%|{threshold}%.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_swap_by_workload', 'Container Memory Swap By workload (GiB)', 'Container swap usage in bytes / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_swap{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Swap Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_working_set_bytes_by_workload', 'Container Memory Working Set By workload (GiB)', 'Current working set in GiB, this includes recently accessed memory, dirty memory, and kernel memory', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_working_set_bytes{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1024 / 1024 / 1024', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Working Set Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_memory_cache_by_workload', 'Container Memory Cache By workload (GiB)', 'Number of bytes of page cache memory / 1073741824', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (container_memory_cache{xm_cont_name!="POD"} + on (xm_clst_id, xm_namespace, xm_pod_id, xm_cont_name) group_left(owner_name) (sum (imxc_kubernetes_container_resource_limit_cpu{container_state="Running", pod_state="Running" ,{filter}}) without (instance)) * 0) / 1073741824', 'Memory', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Cache Memory:{{humanize $value}}GiB|{threshold}GiB.', now(), now()); +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_receive_by_workload', 'Container Network Receive By workload (KiB)', 'Network device statistic receive_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name,xm_entity_type) (rate(container_network_receive_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by (xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Receive Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); + +INSERT INTO public.metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES('container_network_transmit_by_workload', 'Container Network Transmit By workload (KiB)', 'Network device statistic transmit_bytes / 1024', 'sum by(xm_clst_id, xm_namespace, owner_name, xm_pod_id, xm_cont_name, xm_entity_type) (rate(container_network_transmit_bytes_total{} [1m])+ on (xm_clst_id, xm_namespace, xm_pod_id) group_left(owner_name) sum by(xm_clst_id, xm_namespace, xm_pod_id, owner_name) (imxc_kubernetes_container_resource_limit_cpu{{filter}}) * 0) / 1024', 'Network', 'Workload', NULL, TRUE, TRUE, 'CLST:{{$labels.xm_clst_id}} DP:{{$labels.owner_name}} CT:{{$labels.xm_cont_name}} PD:{{$labels.xm_pod_id}} Network Transmit Usage:{{humanize $value}}KiB|{threshold}KiB.', now(), now()); +--Number of Pods not running +INSERT INTO public.metric_meta2 VALUES ('count_pod_not_running_by_workload','Number of Pods not running By Workload','Number of Pods not running (pod_state)','count by (xm_clst_id, xm_pod_id,xm_cont_id, xm_cont_name, entity_type, xm_namespace, pod_state) (imxc_kubernetes_container_resource_limit_cpu{pod_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} POD:{{$labels.xm_pod_id}} State:{{$labels.pod_state}}.',now(),now()); +--Number of Containers not running +INSERT INTO public.metric_meta2 VALUES ('count_container_not_running_by_workload','Number of Containers not running By Workload','Number of Containers not running (container_state)','count by (xm_clst_id, xm_pod_id, xm_cont_id, xm_cont_name, entity_type, xm_namespace, container_state) (imxc_kubernetes_container_resource_limit_cpu{container_state!="Running", {filter}})','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} State:{{$labels.container_state}}.',now(),now()); +-- Containers Restart count +INSERT INTO public.metric_meta2 VALUES ('cotainer_restart_count_by_workload','Number of Containers Restart','Number of Containers Restart (10m)','increase(imxc_kubernetes_container_restart_count{{filter}}[10m])','State','Workload',null,true,false,'CLST:{{$labels.xm_clst_id}} CONT:{{$labels.xm_cont_name}} RESTARTCOUNT FOR 10MINUTE:{{humanize $value}}.',now(),now()); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_per_sec','Service Transaction Count (per Second)','Service Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Transaction Count (per Second)','2021-11-15 16:11:19.606','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_elapsed_time_avg','Service Pod Transaction Elapsed Time (avg)','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))==0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_laytency{{filter}}[1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (increase(imxc_txn_total_count{{filter}}[1m]))','Request','Service',NULL,true,true,'Service Average Elapsed Time','2021-11-15 16:09:34.233','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_rate','Service Transaction Error Rate','Service Transaction Error Rate','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Error Request Rate:{{humanize $value}}%|{threshold}%.','2022-02-15 14:33:00.118000','2022-02-15 15:40:17.640000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_per_sec','Service Pod Transaction Count (per sec)','The number of transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m]))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-02-15 17:59:39.450000','2022-02-15 17:59:39.450000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_elapsed_time_avg','Service Average Elapsed Time','Service Average Elapsed Time','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))== 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_laytency{{filter}}[1m])))/ sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) ((increase(imxc_txn_total_count{{filter}}[1m])))','Request','Service',null,true,true,'SVC:{{$labels.xm_service_name}} Transaction Requests Time Avg:{{humanize $value}}ms|{threshold}ms.','2021-11-15 16:09:34.233000','2021-11-15 16:12:21.335000'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_txn_error_count','Service Transaction Error Count','Service Transaction Error Count','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])))','Request','Service',NULL,true,true,'Service Transaction Error Count','2021-11-15 16:10:31.352','2021-11-15 16:12:21.335'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_txn_error_rate','Service Pod Transaction Error Rate','The number of transaction error rate for pod','sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count{{filter}}[1m])) == 0 or sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_error_count {{filter}} [1m])) / sum by(xm_clst_id, xm_namespace, xm_entity_type, xm_pod_id, xm_service_name) (rate(imxc_txn_total_count {{filter}} [1m])))','Request','Service',null,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Transaction Error rate:{{humanize $value}}|{threshold}.','2022-02-15 18:08:58.180000','2022-02-15 18:08:58.180000'); + +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_active_txn_per_sec','Service Active Transaction Count (per Second)','Service Active Transaction Count (per Second)','sum by(xm_clst_id, xm_namespace, xm_service_name) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:51:45.946','2022-03-11 15:51:45.946'); +INSERT INTO metric_meta2 (id,meta_name,description,expr,resource_type,entity_type,groupby_keys,in_use,anomaly_score,message,created_date,modified_date) VALUES ('imxc_jspd_pod_active_txn_per_sec','Service Pod Active Transaction Count (per sec)','The number of active transaction counts per second for pod','sum by(xm_clst_id, xm_namespace, xm_service_name, xm_pod_id) (rate(imxc_txn_active_count{{filter}}[1m]))','Request','Service',NULL,true,false,'SVC:{{$labels.xm_service_name}} Svc Pod Active Transaction count/Seconds:{{humanize $value}}|{threshold}.','2022-03-11 15:53:29.252','2022-03-11 15:53:29.252'); + + +INSERT INTO public.license_key (id, license_key, set_time, in_used, tenant_id) VALUES (nextval('hibernate_sequence'), 'A46CB0A0870B60DD0EF554F092FB8490C647C4ACCF17177EB0028FEF1B677A1DC86C08219D3D357E55E87B653A9D2F044F9095576ED493CE5D1E180E8843A04BCFE94E500F85491D408CFC7397B82F00063415F4CF8756545B6ED1A38F07F91A7B6D9381B7FC433A5086CDD2D748527ECB42835677199F23F7C8E33A66E8138182DDD76BE4925FA4B1DFD96FD5578FE80C75E0E20D76877BF6FD570265D8E69CAC34795B982CF8D811669894886567E4F5F62E28990953401374B548787E35374BFF201D5C9AD062B326E72F9B1D7791A610DA1BDF1D4F829819BC537E06C8D54F95FB04F2DAC456698F605DE3BBD72E472FC79658C806B188988B053E1E4D96FFFFFF0312983D630FAD5E9160650653074248047030124045265319328119048121312221292096178141356403289033057286071001044254168244430392446457353385472238471183338511051434316333006127241420429465082200161165099271484261287306170426201314452131350327249112310323036187433166345114324280269098441154231174135226128298344425341164290424093450115453299282209144110060155055496368233391148510223372355438125122460232315097083390283180026090507303464176016343147301028053052418046214169100404193398101492126437150008449359062078276386196105011194373118107003376243188284337378334352432479501211364186021040035210237120336302073022394079272002081397132067383497202300181309396185361017436058208454167203412219275329234043427354024133409339470296204490485256467335056F5B2CABD122B376DAEA67944E1CCE6867DF9EB6504C78F817DF9EB6504C78F81BF1E615E6EC6242C9667BD675FC5FA39C6672FE2068E5D1431C6CD04429D07655865E293C1F77ED7A0D33F5556DA6CD3A8EC2774DB04F797CE4A29B0312F75E585D51D7B4DD227EA6BD5278CB9233040E7DD2B30A6D5119959D5B7EAC826D3DA0537EFB5A034A6A1C91A619F4E168F46A455B594C91F058E1E22C7EA2957EED7533D069C335C95B4FA2B53E71A800343EA7F16B05AFBA04635F1FBDE9C81709C27BA075C78FA26311ED3A4A5226EF47FC84C3024999406B47F2098B5983CC3CAF79F92332074B9872E429CBE8EF12D5092628E4D4A39CBDDFCAAB2E382229CF09A5B10243340C1A7A0C5CBC14C704FCE873571524A5B038F1781CD31A4D8E2C48E02E63A2746E668273BE9D63937B88D8C864CE439528EB13BDFAC3E52EE4B8CB75B4ED65A7C97B42E5DAEE3E41D2331B06FFFBA71BECD9B96AEEB969670FC3869CC59050FD6DFA32457195314104022250232266247291151DEFAULT_TENANT', now(), true, 'DEFAULT_TENANT'); +insert into public.license_key2 (id, license_key, set_time, cluster_id, license_used) values (nextval('hibernate_sequence'), 'D041F44269EAFF1AF7C37ACAA86B7D9CBED89547431E777B797220CF62FE5D6A27C66BEBEAB8F4C89EA5379009C90CDEBFFAE307B7AEB897DC4D8CEAB61654340BB746B0B46679A9FB4791C777BAEBA176308F6BEB1654CE43D4E80E6D0F80CEC00B1EC30E7DA4BB8D3159133EF98AEB50617107DB77BE94676E0D4AA04ADA3B11A66824DB89A60C52BC1AB92926F10189DBBA6210B31478F48CF87B5D754F1A7C6BED0D1637742179DBF7BE82B3B3357AEA82CFAAD9126E39C4E19BABCB1CBDDB816C86A8F7C476D963265720383B627800775B0C9116D67CE5CB7CFC71D0A8A36623965EBB18A5BE1816FB1FAAAEAC361D2ABBC7344EC0B6C61E0395115B13FFFFFF03DEF34E840F2ED2AC84AC44DF368362366124308470063002498494067338303241077065122260378200508377102354337080160182150254091118451110391059070094162363290186239455351194330333503046082379128006166220287276298120398066372099177432015458270176242025196335311342039022343475412085392206244005184417460227292375103433217376511140361223163316121467443014486278407389237024349111268136424371062035285300509195050441367478101310353464249250399393211468032382017479033204215420319027225173414447170427346074048078201158299332476339297492269181214328291096331271222221199421106169418137405411466364104047152090465446480302462385088114481261428257207129020358100073347153355274495263056109229159157348228275180360410147142130230179450079472482323145202198010119F9BFDDF3C203A7E537AB046811BB7CEA37AB046811BB7CEA37AB046811BB7CEAE012403885A8163C0E3E14D7AD6207B5E8CE91579501D84B09D6682339A4DB462F479FFE1B232AFB3D19E925768AF0AA3E62D9AB6F9CEADDB1CDCA351CAA90996631814A556C47270431A6A40891F756FDDCA7BDD05C62A2932F8E77979E0D43C9F12565B1F4BB4F0520B44CC76BAC23F65330AC5966D22B209F32126132F4848E500A013F4DC32306A9620394D40C94B8EBC2406B68EBE31DAB17EF2DF977731A5C41C11311DC36E1FB8BC2529D1AA20D5D46919472212D781B1D77378872CBD14C2A5B783C7ADF0D2680946C52E56E186A7E971E7EAB2CF09511361DD892B5D4A113E8A2C60E3F7FEFA4100753D82B7064101002937733CE0285C73130635F0CBBDF6F1160C2917B2DF9B1C391A8E9D7D9F380BF31A77A84017D0DF26B35BED6B2D145A051EB4345DA90241CA997828B8393ACD5C7316594634356CCC3986EFDD7776AC62C65E500ED125097142489479219130046503035CloudMOA', now(), null, true); + +INSERT INTO public.license_policy +(policy_id, policy_desc, term_year, term_month, term_day, license_type, allowable_range, storage_capacity, cluster_count, node_count, pod_count, service_count, core_count, host_ids, user_division, created_date, modified_date) +VALUES('promotion_license', '프로모션 기간에 사용자들에게 발급되는 라이선스', 0, 0, 14, 'trial', '0', 'unlimited', '1', '10', 'unlimited', 'unlimited', 'unlimited', 'unlimited', '1', now(), null); + +INSERT INTO public.report_template(id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) VALUES(nextval('hibernate_sequence'), 'admin', '2020-04-28 09:29:49.466', 'admin', '2020-04-28 09:29:49.466', '0 0 1 ? * * *', true, +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s Cluster resource usage is displayed.

1. CPU Usage

${metricItem1587977724113}

2. Memory Usage

${metricItem1588037028605}

3. Network

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod


1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}





', 'cloudmoa Cluster Daily Report'); +INSERT INTO public.report_template (id, created_by, created_date, modified_by, modified_date, cron_exp, "enable", metric_data, template_data, title) +VALUES(nextval('hibernate_sequence'), 'admin', '2020-01-20 01:17:50.182', 'admin', '2020-04-29 08:01:40.841', '0 0 9 ? * * *', false, +'[{"id":"metricItem1579497906163","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_cpu_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1579497916213","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_memory_usage","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Memory Usage (%)","displayType":"bar","unit":"%","data":""},{"id":"metricItem1579497928963","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_network_receive","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node Network Receive (KiB)","displayType":"pie","unit":"%","data":""},{"id":"metricItem1579497947243","requestInfo":{"clusterId":"cloudmoa","namespace":"","entityId":"exem-master,exem-node001,exem-node002","metricId":"node_load5","type":"node","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Node CPU Load 5m Average","displayType":"table","unit":"%","data":""}]', +'

1. editor usage

Let''s write the editor.

1.1 Text Decoration

Bold
Itelic
Strike


1.2 Color and blockquote

What''s your color?

Today is the first day of the rest of your life

1.3 List

  • Apple
  • Banana

  1. postgre
  2. cassandra
  3. prometheus

[ TODO List ]
  • Create DB table
  • Charge file name

1.4 Link, Table, Image




Deamonset NameAgeNamespaceLabelsImageCPUMemory
imxc-agent5
day
imxcimxc-agentregistry.openstacklocal:5000/imxc/imxc-agent:latest83.151.68
GiB
kube-flannel-ds-amd643
month
kube-systemflannelnodequay.io/coreos/flannel:v0.11.0-amd641.0790.88
MiB
kube-proxy10
month
kube-systemkube-proxyk8s.gcr.io/kube-proxy:v1.16.01.18117.66
MiB
node-exporter10
month
defaultnode-exporternode-exporterprom/node-exporter4.7697.54
MiB

exem.jpg

1.6 Metric Item

${metricItem1579497906163}
${metricItem1579497916213}
${metricItem1579497928963}
${metricItem1579497947243}



















', 'Editor usage example'); + +INSERT INTO public.report_static(id, created_by, created_date, modified_by, modified_date, cron_exp, metric_data, template_data, title, "type", report_template_id) VALUES(10582051, 'admin', '2020-04-29 08:27:52.545', 'admin', '2020-04-29 08:27:52.545', '0 0 1 ? * * *', +'[{"id":"metricItem1587977724113","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_cpu_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster CPU Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588037028605","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_memory_usage","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Memory Usage (%)","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059107546","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_receive","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Receive","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059110952","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_network_transmit","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Network Transmit","displayType":"line","unit":"%","data":""},{"id":"metricItem1588059623963","requestInfo":{"clusterId":"cloudmoa","namespace":"All","entityId":"","metricId":"cluster_pod_ready_count","type":"Cluster","selectTime":"hour","dateType":"relative","startTime":"1d0h","endTime":"0d0h"},"metricName":"Cluster Pod Ready Count","displayType":"line","unit":"%","data":""}]', +'

1. Cluster Resource

Today''s cluster resource usage flow is shown.

1. CPU Usage

Abnormally high CPU usage by particular programs can be an indication that there is something wrong with the computer system.

${metricItem1587977724113}

2. Memory Usage

The Memory Usage window displays the amount of memory available on your system, as well as the memory currently in use by all applications, including Windows itself.

${metricItem1588037028605}

3. Network

A network transmit/receive provides basic network utilization data in relation to the available network capacity.

Transmit

${metricItem1588059107546}

Receive

${metricItem1588059110952}

2. Pod

1. Allocated Pods Count Trend

Running Pod Count
${metricItem1588059623963}







', +'cloudmoa Cluster Daily Report', 'manual', (select id from report_template where title='cloudmoa Cluster Daily Report')); + +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', (select id from auth_resource2 where name='CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +-- INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', (select id from auth_resource2 where name='Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":1,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget1","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":"cloudmoa"},"namespace":{"mod":false,"value":null},"entity":{"mod":true,"type":["node"],"value":["exem-master","exem-node001","exem-node002"]}}},"visualization":{"type":"select"}},"x":0,"y":0},{"i":"widget1","widget":{"header":"default-header","body":"line-chart-view","title":"CPU Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":2},{"i":"widget2","widget":{"header":"default-header","body":"horizontal-bar-chart-view","title":"Memory Usage"},"w":18,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":false,"clusterId":"cloudmoa","metricId":"node_memory_usage","entityId":[],"type":"node"}},"visualization":{"showLegend":true}},"x":0,"y":13},{"i":"widget3","widget":{"header":"default-header","body":"line-chart-view","title":"Network Transmit (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_transmit","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":2},{"i":"widget4","widget":{"header":"default-header","body":"line-chart-view","title":"Network Receive (KiB)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_network_receive","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":2},{"i":"widget5","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Pod Running Count"},"w":30,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_pod_running_count","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":24},{"i":"widget6","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Read Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_read_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":18,"y":13},{"i":"widget7","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Disk Write Latency (ms)"},"w":15,"h":11,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_disk_write_latency","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":33,"y":13},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Filesystem Usage (%)"},"w":18,"h":12,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":"cloudmoa","metricId":"node_filesystem_usage","entityId":[],"type":"node","namespace":"default"}},"visualization":{"showLegend":true}},"x":0,"y":24}]', 'CloudMOA - Nodes Resource', +(select id from auth_resource3 where name='dashboard|admin|CloudMOA - Nodes Resource'), 'admin', 'admin', NULL, true); +INSERT INTO public.dashboard2 (id, created_date, modified_date, layout, title, auth_resource_id, created_by, modified_by, description, "share") VALUES(nextval('hibernate_sequence'), '2020-04-28 09:23:14.286', '2020-04-28 09:23:44.213', '[{"i":"widget0","widget":{"header":"default-header","body":"service-tps-view","title":"Service TPS"},"w":24,"h":7,"minW":12,"minH":6,"maxW":48,"maxH":16,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":0,"y":2},{"i":"widget1","widget":{"header":"default-header","body":"event-view"},"w":48,"h":2,"minW":2,"minH":2,"maxW":48,"maxH":36,"component":{"params":{"targets":["widget0","widget2","widget3","widget4","widget5","widget6","widget7","widget8"],"action":"changeFilter","options":{"clusterId":{"mod":true,"value":null},"namespace":{"mod":true,"value":null},"entity":{"mod":true,"type":["service"],"value":[]}}},"visualization":{"type":"select"}},"viewStyle":{"backgroundColor":"#252525"},"x":0,"y":0},{"i":"widget2","widget":{"header":"default-header","body":"service-treeMap-view"},"w":24,"h":21,"minW":20,"minH":10,"maxW":48,"maxH":48,"component":{"api":{"uri":"metric.chart","params":{"clusterId":null,"namespace":null,"entityId":null,"type":"service","range":false}}},"x":24,"y":2},{"i":"widget3","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Request Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":9},{"i":"widget4","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Error Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_errors_count","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":0,"y":16},{"i":"widget5","widget":{"header":"default-header","body":"scatter-chart-view","bodyClass":["drag-ignore"],"title":"Xview","headerClass":["drag-handle"]},"w":24,"h":13,"minW":20,"minH":12,"maxW":68,"maxH":60,"component":{"api":{"params":{}}},"x":0,"y":23},{"i":"widget6","widget":{"header":"default-header","body":"event-list-view","title":"Event List"},"w":24,"h":13,"minW":24,"minH":12,"maxW":48,"maxH":36,"component":{"api":{"params":{"clusterId":null}}},"x":24,"y":23},{"i":"widget7","widget":{"header":"default-header","body":"line-chart-view","title":"Service Latency"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_time_avg","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":9},{"i":"widget8","widget":{"header":"default-header","body":"stack-bar-chart-view","title":"Service Total Transaction Count"},"w":12,"h":7,"minW":8,"minH":4,"maxW":48,"maxH":18,"component":{"api":{"uri":"metric.chart","params":{"unique":false,"range":true,"clusterId":null,"namespace":null,"metricId":"imxc_service_http_requests_per_sec_by_api","entityId":"","type":null}},"visualization":{"showLegend":true}},"x":12,"y":16}]', 'Service Detail', +(select id from auth_resource3 where name='dashboard|admin|Service Detail'), 'admin', 'admin', NULL, true); + +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('normal_score', '20', null, null, 'anomaly', '2020-07-07 18:15:55.000000', '2020-07-07 18:15:53.000000'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('attention_score', '60', null, null, 'anomaly', '2020-07-07 09:18:04.968765', '2020-07-07 09:18:04.968765'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('warning_score', '90', null, null, 'anomaly', '2020-07-07 09:18:17.091678', '2020-07-07 09:18:17.091678'); +INSERT INTO public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) VALUES ('collection_weeks', '5', null, null, 'anomaly', '2020-07-13 03:52:44.445408', '2020-07-13 03:52:44.445408'); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_storage_period', 7, 'retention period setting value for topology information', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_storage_period', 3, 'retention period setting value for trace data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_storage_period', 7, 'retention period setting value for event data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('metric_storage_period', 7, 'retention period setting value for metric data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_storage_period', 90, 'retention period setting value for sparse log', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_storage_period', 7, 'retention period setting value for anomaly score', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_storage_period', 7, 'retention period setting value for alert data', null, 'storage', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_storage_period', 7, 'retention period setting value for audit data', null, 'storage', now(), null); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('topology_idx', 'kubernetes_cluster_info:kubernetes_cluster_history:kubernetes_cronjob_info:kubernetes_info:kubernetes_job_info:kubernetes_network_connectivity:kubernetes_pod_info:kubernetes_pod_history', 'elastic search topology type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('trace_idx', 'spaninfo:sta_httpapi:sta_httpsummary:sta_podinfo:sta_relation:sta_tracetrend:sta_externalrelation:sta_traceinfo:jspd_ilm', 'elastic search trace type data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('event_idx', 'kubernetes_event_info', 'elastic search for event data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('sparse_idx', 'sparse_model:sparse_log', 'elastic search sparse data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('anomaly_idx', 'entity_score:metric_score:timeline_score', 'elastic search amomaly data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('alert_idx', 'alert_event_history', 'elastic search alert data index', null, 'storageidx', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('audit_idx', 'kubernetes_audit_log', 'elastic search audit type data index', null, 'storageidx', now(), null); + +-- insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) values ('ratelimiting', 2.0, '{"type" : "int", "operator" : "range", "minVal" : "1", "maxVal" : "3000", "desc" : "The time-based sampling method allows input as an integer (e.g. 1 monitors only 1 trace per second)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('probabilistic', 0.1, '{"type" : "float", "operator" : "range", "minVal" : "0", "maxVal" : "1.0", "desc" : "Probability-based sampling method allows input between 0 and 1 (e.g. 0.1 monitors only 10% of trace information)" }', null, 'tracesampling', '2020-07-30 13:54:52', null); + +INSERT INTO common_setting values('alert_expression','==,<=,<,>=,>', 'alert expression for user custom', null,'alert', now(), now()); + +INSERT INTO common_setting values('job_duration_range','86400', 'job duration range for average', null,'job', now(), now()); + +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Topology Agent', 'topology-agent', 'topology agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Agent', 'metric-agent', 'metric agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Trace Agent', 'cloudmoa-trace-agent', 'trace agent deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Datagate', 'datagate', 'datagate deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Jspd Collector', 'jspd-lite-collector', 'jspd collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Collector', 'metric-collector', 'metric collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Cloudmoa Collector', 'cmoa-collector', 'cloudmoa collector deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Authentication Server', 'auth-server', 'authentication server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Notification Server', 'noti-server', 'notification server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Eureka Server', 'eureka', 'eureka server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Zuul Server', 'zuul-deployment', 'zuul server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Api Server', 'imxc-api', 'api server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Ui Server', 'imxc-ui', 'ui server deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Master', 'metric-analyzer-master', 'metric analyzer master deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Metric Analyzer Worker', 'metric-analyzer-worker', 'metric analyzer worker deployment name', null, 'modules', now(), null); +insert into public.common_setting (code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +values ('Kafka Stream Txntrend', 'kafka-stream-txntrend-deployment', 'kafka stream txntrend deployment name', null, 'modules', now(), null); + +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('error_msg', 'false', 'Error Message default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('alert_sound', 'false', 'Alert Sound default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('session_persistence', 'true', 'Session Persistence default value', '', 'user_setting', now(), null); +INSERT INTO public.common_setting +(code_id, code_value, code_desc, code_auth, code_group, created_date, modified_date) +VALUES('gpu_acc_topology', 'true', 'GPU Accelerated Topology default value', '', 'user_setting', now(), null); + +insert into public.log_management (cluster_id, node_id, log_rotate_dir, log_rotate_count, log_rotate_size, log_rotate_management, back_up_dir, back_up_period, back_up_dir_size, back_up_management, created_date, modified_date) values ('cloudmoa', '', '/var/lib/docker', 3, 100, true, '/home/moa/log', 5, 1000, true, '2020-07-30 13:54:52', null); + +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (5, 'metrics-server', 'agent', 'Metrcis-Server는 Kubernetes의 kubelet에 있는 cAdvisor로부터 Container Metric 데이터를 수집하여 Prometheus에 전달하는 역할을 합니다.', null, '--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloudmoa-aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloudmoa-metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloudmoa-metrics-server +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloudmoa-metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloudmoa-metrics-server +subjects: + - kind: ServiceAccount + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" +spec: + selector: + k8s-app: cloudmoa-metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloudmoa-metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metrics-server + namespace: kube-system + labels: + k8s-app: cloudmoa-metrics-server +spec: + selector: + matchLabels: + k8s-app: cloudmoa-metrics-server + template: + metadata: + name: cloudmoa-metrics-server + labels: + k8s-app: cloudmoa-metrics-server + spec: + serviceAccountName: cloudmoa-metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: cloudmoa-metrics-server + image: $DOCKER_REGISTRY_URL/metrics-server-amd64 + command: + - /metrics-server + - --logtostderr + - --v=4 + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP + volumeMounts: + - name: tmp-dir + mountPath: /tmp1', true, '2021-03-11 13:41:48.000000', '2021-03-11 13:41:56.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (7, 'jaeger', 'application', 'CloudMOA에서는 고객사에서 운영 중인 application의 TPS, 서비스 연관관계 등의 데이터를 얻기 위해서 Jaeger를 사용하며, Jaeger 사용을 위해 Jaeger-client, jaeger-agent, jaeger-collector의 설치가 필요합니다. +', null, '--- +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + selector: + matchLabels: + app: cloudmoa-trace-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-trace-agent + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - image: $DOCKER_REGISTRY_URL/trace-agent:$IMAGE_TAG + name: cloudmoa-trace-agent + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT +- apiVersion: v1 + kind: Service + metadata: + name: cloudmoa-trace-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-trace-agent + spec: + ports: + - name: agent-zipkin-thrift + port: 5775 + protocol: UDP + targetPort: 5775 + - name: agent-compact + port: 6831 + protocol: UDP + targetPort: 6831 + - name: agent-binary + port: 6832 + protocol: UDP + targetPort: 6832 + - name: agent-configs + port: 5778 + protocol: TCP + targetPort: 5778 + selector: + app: cloudmoa-trace-agent + type: ClusterIP', true, '2021-03-11 17:48:34.000000', '2021-03-11 17:48:39.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (4, 'node-exporter', 'agent', 'Node에 관련된 Metric 시계열 데이터를 수집하여 고객사 클러스터에 설치된 Prometheus에 전달하는 역할을 합니다.', null, '--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: ''true'' + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + clusterIP: None + ports: + - name: scrape + port: 9110 + protocol: TCP + selector: + app: cloudmoa-node-exporter + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-node-exporter + namespace: $CLOUDMOA_NAMESPACE +spec: + selector: + matchLabels: + app: cloudmoa-node-exporter + template: + metadata: + labels: + app: cloudmoa-node-exporter + name: cloudmoa-node-exporter + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - image: $DOCKER_REGISTRY_URL/node-exporter + name: cloudmoa-node-exporter + ports: + - containerPort: 9110 + hostPort: 9110 + name: scrape + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|run|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.tcpstat + - --web.listen-address=:9110 + # --log.level=debug + env: + - name: GOMAXPROCS + value: "1" + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: false + - mountPath: /host/sys + name: sys + readOnly: false + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root +', true, '2021-03-11 13:41:02.000000', '2021-03-11 13:41:06.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (2, 'agent', 'agent', '관제 대상 클러스터의 Topology 데이터를 수집하여 Kafka를 통해 수집 클러스터에 전달하는 역할을 하며, 그 밖에 API 서버와의 TCP 연결을 통해 관리 기능, Log Viewer 기능 등을 수행합니다.', null, '--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloudmoa-cluster-role +rules: + - nonResourceURLs: + - "*" + verbs: + - get + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - nodes/stats + - endpoints + - namespaces + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - deployments/scale + - replicasets + - replicasets/scale + - statefulsets + - statefulsets/scale + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - update + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - update + - apiGroups: + - storage.j8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - imxc-ps + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client-kubelet + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kubelet-serving + resources: + - signers + verbs: + - approve + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - ''*'' + - apiGroups: + - ''*'' + resources: + - ''*'' + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloudmoa-restricted-rb + namespace: $CLOUDMOA_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: $CLOUDMOA_NAMESPACE +roleRef: + kind: ClusterRole + name: cloudmoa-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: cloudmoa-psp + namespace: $CLOUDMOA_NAMESPACE +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + hostPorts: + - max: 65535 + min: 0 + hostNetwork: true + hostPID: true + volumes: + - configMap + - secret + - emptyDir + - hostPath + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloudmoa-topology-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-topology-agent +spec: + selector: + matchLabels: + app: cloudmoa-topology-agent + template: + metadata: + labels: + app: cloudmoa-topology-agent + spec: + hostNetwork: true + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: cloudmoa-topology-agent + image: $DOCKER_REGISTRY_URL/topology-agent:$IMAGE_TAG + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 600Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + - mountPath: /log + name: log-volume + env: + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "INFO" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / + - name: log-volume + hostPath: + path: /home', true, '2021-03-11 13:37:48.000000', '2021-03-11 13:37:51.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (6, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.16', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: LOG_MAXAGE + value: "1" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', false, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); +insert into public.agent_install_file_info (id, name, type, description, version, yaml, use_yn, created_date, modified_date) values (3, 'prometheus', 'agent', 'Prometheus는 다양한 Exporter들과 연결될 수 있으며, 기본적으로 Node Exporter와 cAdvisor를 통해 수집한 Metric 데이터를 Kafka를 통해 수집 클러스터에 전달하는 역할을 합니다.', '1.15', '--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudmoa-metric-agent-config + namespace: $CLOUDMOA_NAMESPACE +data: + scaling.rules: | + groups: + - name: scaleup + rules : + - alert : ScaleUpRule + expr: job:webapp_config_open_sessions_current_count:sum > 15 + annotations: + summary: "Scale up when current sessions is greater than 15" + description: "Firing when total sessions active greater than 15" + metric-agent.yml: | + global: + scrape_interval: 15s + + scrape_configs: + - job_name: ''kubernetes-kubelet'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - target_label: xm_entity_type + replacement: ''Node'' + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + - job_name: ''kubernetes-node-exporter'' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: ''(.*):10250'' + replacement: ''${1}:9110'' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: ''kubernetes-(.*)'' + replacement: ''${1}'' + target_label: name + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Node'' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_memory_SReclaimable_bytes|node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + + - job_name: ''kubernetes-cadvisor'' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: ''$CLOUDMOA_CLUSTER_ID'' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: ''Container'' + + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloudmoa-metric-agent + namespace: $CLOUDMOA_NAMESPACE + labels: + app: cloudmoa-metric-agent +spec: + selector: + matchLabels: + app: cloudmoa-metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: cloudmoa-metric-agent + spec: + containers: + - name: cloudmoa-metric-agent + image: $DOCKER_REGISTRY_URL/metric-agent:$IMAGE_TAG + args: + - --config.file=/etc/metric-agent/metric-agent.yml + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 300m + memory: 1000Mi + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + env: + - name: LOG_LEVEL + value: "INFO" + - name: CLUSTER_ID + value: $CLOUDMOA_CLUSTER_ID + - name: DATAGATE + value: $COLLTION_SERVER_DATAGATE_IP:$COLLTION_SERVER_DATAGATE_PORT + - name: STORAGE_TYPE + value: datagate + restartPolicy: Always + volumes: + - name: config-volume + configMap: + name: cloudmoa-metric-agent-config +', true, '2021-03-11 13:39:07.000000', '2021-03-11 13:39:09.000000'); + +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('config', now(), null, 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', 'global:${GLOBAL}\nroute:${ROUTE}\nreceivers:${RECEIVERS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('global', now(), null, '\n resolve_timeout: ${RESOLVE_TIMEOUT}', '\n resolve_timeout: 5m', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('receivers', now(), null, '\n- name: ''${NAME}''\n webhook_configs:${WEBHOOK_CONFIGS}', '\n- name: ''cdms''\n webhook_configs:${WEBHOOK_CONFIGS}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('route', now(), null, '\n receiver: ''${RECEIVER}''\n group_by: [${GROUP_BY}]\n group_wait: ${GROUP_WAIT}\n group_interval: ${GROUP_INTERVAL}\n repeat_interval: ${REPEAT_INTERVAL}\n routes:${ROUTES}', '\n receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 10m\n routes:${ROUTES}', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('webhook_configs', now(), null, '\n - url: ''${WEBHOOK_URL}''\n send_resolved: ${SEND_RESOLVED}', '\n - url: ''${WEBHOOK_URL}''\n send_resolved: false', true); +insert into public.alert_config_info (config_id, created_date, modified_date, config_data, config_default, in_use) values ('routes', now(), null, '\n - receiver: ''${ROUTES_RECEIVER}''\n group_by: [${ROUTES_GROUP_BY}]\n group_wait: ${ROUTES_GROUP_WAIT}\n group_interval: ${ROUTES_GROUP_INTERVAL}\n repeat_interval: ${ROUTES_REPEAT_INTERVAL}\n match_re:\n level: ${LEVEL}\n continue: ${CONTINUE}', '\n - receiver: ''cdms''\n group_by: [xm_clst_id, level]\n group_wait: 5s\n group_interval: 5s\n repeat_interval: 1m\n match_re:\n level: Critical\n continue: true', true); + + +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('config', now(), null, 'groups:${GROUPS}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('groups', now(), null, '\n- name: "${NAME}"\n rules:${RULES}', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('isHost', now(), null, '\n instance: "{{ $labels.instance }}"\n is_host: "true"', true); +insert into public.alert_rule_config_info (config_id, created_date, modified_date, config_data, in_use) values ('rules', now(), null, '\n - alert: "${ALERT}"\n expr: "${EXPR}"\n labels:\n level: "${LEVEL}"\n for: "${FOR}"\n annotations:\n xm_service_name: "{{ $labels.xm_service_name }}"\n level: "${LEVEL}"\n meta_id: "${META_ID}"\n xm_node_id: "{{ $labels.xm_node_id }}"\n threshold: ${THRESHOLD}\n xm_container_id: "{{ $labels.xm_cont_name }}"\n message: "${MESSAGE}"\n rule_id: ${RULE_ID}\n xm_pod_id: "{{ $labels.xm_pod_id }}"\n xm_clst_id: "{{ $labels.xm_clst_id }}"\n xm_namespace: "{{ $labels.xm_namespace }}"\n value: "{{ $value }}"\n xm_entity_type: "{{ $labels.xm_entity_type }}"\n alert_entity_type: "${ALERT_ENTITY_TYPE}"', true); + + +INSERT INTO jspd_prop values('TRX_NAME_TYPE','0', 'Set the transaction name generation method (0:default, 1:parameter, 2:param_nouri, 3:attribute)', 'integer','select','{"default":"0", "parameter":"1", "param_nouri":"2", "attribute":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('TRX_NAME_KEY','', 'Set the transaction name generation method by TRX_NAME_TYPE (parameter(1), param_nouri(2),attribute(3))','string','input','',true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_TXN','*:3000', 'Option to check TXNNAME with startsWith logic and collect calltree based on elapsetime. blank or set to *:0 when collecting all.', 'string','input','', true, now(), now()); +INSERT INTO jspd_prop values('CURR_TRACE_LEVEL','100', 'call tree detection level', 'integer','range','{"gte":"0", "lte":"100"}',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_JDBC','true', 'include call tree data', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_SERVICE','gif,js,css,xml', 'exclude service name', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('INCLUDE_EXCEPTION','', 'Exception that you do not want to be treated as an exception transaction is set.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_EXCEPTION','', 'Set the exception to be treated as an exception transaction.(type.Exception)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RESP_HEADER_TID','false', 'include X-Xm-Tid text for gearing imxwsmj', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_RUNTIME_REDEFINE_HTTP_REMOTE','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI','false', 'rt.jar (socket, file, throwable) function use yn option', 'boolean','input','',true, now(), now()); + +INSERT INTO jspd_prop values('RT_RMI_TYPE','3', 'remote key value(1: pkey, 2: ckey, 3: pckey)', 'integer','select','{"pkey":"1", "ckey":"2", "pckey":"3"}',true, now(), now()); +INSERT INTO jspd_prop values('RT_RMI_ELAPSE_TIME','0', 'Collect transactions that are greater than or equal to the option value', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_FILE','0x10', 'Display file input/output in call tree', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('RT_SOCKET','0x10', 'Display socket input/output in call tree', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('MTD_LIMIT','100000', 'Limit the number of calltree', 'integer','range','{"gte":"0"}',true, now(), now()); + +INSERT INTO jspd_prop values('LIMIT_SQL','20', 'Collection limits based on SQL sentence length', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_COUNT_LIMIT','3000', 'Transactions per second', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_ELLIPSIS','false', 'Collect length of sql string by half of SQL_TEXT_BUFFER_SIZE', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_SQL_LIMIT_COUNT','2000', 'SQL collection limit', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_CPU_TIME','false', 'cpu time metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TXN_MEMORY','false', 'memory alloc size metric used in transactions option', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('ENABLE_WEB_ID_WHEN_NO_USERAGENT','false', 'Do not create an web ID unless requested by the browser', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_SQL_SEQ','false', 'Add sequence number to sql and packet', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_FETCH_METHOD','false', 'Display the fetch function of ResultSet in the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('EXCLUDE_THREAD','', 'Ability to block monitoring of a specific thread name, value = String[] (prefix1,prefix2)', 'string','input','',true, now(), now()); +INSERT INTO jspd_prop values('USE_METHOD_SEQ','false', 'Display the calltree in the form of a time series without summary', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_MEMORY','false', 'Collects allocation memory for each method of calltree. (unit k)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('TRACE_METHOD_CPUTIME','false', 'Collects cputime for each method of calltree. (unit ms)', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('DISABLE_ROOT_METHOD','false', 'Express the service root method at the top of the call tree', 'boolean','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_BUFFER_SIZE','2500', 'size of the internal buffer that stores the call tree method data.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_STACK_BUFFER_SIZE','100', 'A separate option to additionally collect methods that did not generate an error among methods that were not collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('MTD_EXCEPTION_BUFFER_SIZE','100', 'A separate option to additionally collect methods that have an error among methods that could not be collected because the MTD_BUFFER_SIZE option value was exceeded.', 'integer','input','',true, now(), now()); +INSERT INTO jspd_prop values('DEBUG','0x000000000', 'Option to specify log level (Debugging)', 'string','input','',true, now(), now()); + +INSERT INTO jspd_prop values('EXCEPTION_LIMIT', '-1', 'Exception content length limit', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_PERIOD', '1000', 'Txninfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_PERIOD', '1000', 'Txnmethod transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_PERIOD', '1000', 'Txnspl transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_PERIOD', '1000', 'E2einfo transmission cycle (ms)', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('TXN_SEND_LIMIT', '15000', 'Txninfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('MTD_SEND_LIMIT', '15000', 'Txnmethod maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('SQL_SEND_LIMIT', '15000', 'Txnsql maximum number of transfers', 'integer', 'input', '', true, now(), now()); +INSERT INTO jspd_prop values('ETOE_SEND_LIMIT', '15000', 'E2einfo maximum number of transfers', 'integer', 'input', '', true, now(), now()); diff --git a/roles/cmoa_install/files/04-keycloak/Chart.yaml b/roles/cmoa_install/files/04-keycloak/Chart.yaml new file mode 100644 index 0000000..a5d4032 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 4.0.0 +description: Modified Authentication Module By EXEM CloudMOA +home: https://www.keycloak.org/ +icon: https://www.keycloak.org/resources/images/keycloak_logo_480x108.png +keywords: +- sso +- idm +- openid connect +- saml +- kerberos +- ldap +maintainers: +- email: unguiculus@gmail.com + name: unguiculus +- email: thomas.darimont+github@gmail.com + name: thomasdarimont +name: keycloak +sources: +- https://github.com/codecentric/helm-charts +- https://github.com/jboss-dockerfiles/keycloak +- https://github.com/bitnami/charts/tree/master/bitnami/postgresql +version: 11.0.1 diff --git a/roles/cmoa_install/files/04-keycloak/OWNERS b/roles/cmoa_install/files/04-keycloak/OWNERS new file mode 100644 index 0000000..8c2ff0d --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/OWNERS @@ -0,0 +1,6 @@ +approvers: + - unguiculus + - thomasdarimont +reviewers: + - unguiculus + - thomasdarimont diff --git a/roles/cmoa_install/files/04-keycloak/README.md b/roles/cmoa_install/files/04-keycloak/README.md new file mode 100644 index 0000000..5f8da10 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/README.md @@ -0,0 +1,765 @@ +# Keycloak + +[Keycloak](http://www.keycloak.org/) is an open source identity and access management for modern applications and services. + +## TL;DR; + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Introduction + +This chart bootstraps a [Keycloak](http://www.keycloak.org/) StatefulSet on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +It provisions a fully featured Keycloak installation. +For more information on Keycloak and its capabilities, see its [documentation](http://www.keycloak.org/documentation.html). + +## Prerequisites Details + +The chart has an optional dependency on the [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart. +By default, the PostgreSQL chart requires PV support on underlying infrastructure (may be disabled). + +## Installing the Chart + +To install the chart with the release name `keycloak`: + +```console +$ helm install keycloak codecentric/keycloak +``` + +## Uninstalling the Chart + +To uninstall the `keycloak` deployment: + +```console +$ helm uninstall keycloak +``` + +## Configuration + +The following table lists the configurable parameters of the Keycloak chart and their default values. + +| Parameter | Description | Default | +|---|---|---| +| `fullnameOverride` | Optionally override the fully qualified name | `""` | +| `nameOverride` | Optionally override the name | `""` | +| `replicas` | The number of replicas to create | `1` | +| `image.repository` | The Keycloak image repository | `docker.io/jboss/keycloak` | +| `image.tag` | Overrides the Keycloak image tag whose default is the chart version | `""` | +| `image.pullPolicy` | The Keycloak image pull policy | `IfNotPresent` | +| `imagePullSecrets` | Image pull secrets for the Pod | `[]` | +| `hostAliases` | Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files | `[]` | +| `enableServiceLinks` | Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links | `true` | +| `podManagementPolicy` | Pod management policy. One of `Parallel` or `OrderedReady` | `Parallel` | +| `restartPolicy` | Pod restart policy. One of `Always`, `OnFailure`, or `Never` | `Always` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | `""` | +| `serviceAccount.annotations` | Additional annotations for the ServiceAccount | `{}` | +| `serviceAccount.labels` | Additional labels for the ServiceAccount | `{}` | +| `serviceAccount.imagePullSecrets` | Image pull secrets that are attached to the ServiceAccount | `[]` | +| `rbac.create` | Specifies whether RBAC resources are to be created | `false` +| `rbac.rules` | Custom RBAC rules, e. g. for KUBE_PING | `[]` +| `podSecurityContext` | SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) | `{"fsGroup":1000}` | +| `securityContext` | SecurityContext for the Keycloak container | `{"runAsNonRoot":true,"runAsUser":1000}` | +| `extraInitContainers` | Additional init containers, e. g. for providing custom themes | `[]` | +| `extraContainers` | Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy | `[]` | +| `lifecycleHooks` | Lifecycle hooks for the Keycloak container | `{}` | +| `terminationGracePeriodSeconds` | Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance | `60` | +| `clusterDomain` | The internal Kubernetes cluster domain | `cluster.local` | +| `command` | Overrides the default entrypoint of the Keycloak container | `[]` | +| `args` | Overrides the default args for the Keycloak container | `[]` | +| `extraEnv` | Additional environment variables for Keycloak | `""` | +| `extraEnvFrom` | Additional environment variables for Keycloak mapped from a Secret or ConfigMap | `""` | +| `priorityClassName` | Pod priority class name | `""` | +| `affinity` | Pod affinity | Hard node and soft zone anti-affinity | +| `nodeSelector` | Node labels for Pod assignment | `{}` | +| `tolerations` | Node taints to tolerate | `[]` | +| `podLabels` | Additional Pod labels | `{}` | +| `podAnnotations` | Additional Pod annotations | `{}` | +| `livenessProbe` | Liveness probe configuration | `{"httpGet":{"path":"/health/live","port":"http"},"initialDelaySeconds":300,"timeoutSeconds":5}` | +| `readinessProbe` | Readiness probe configuration | `{"httpGet":{"path":"/auth/realms/master","port":"http"},"initialDelaySeconds":30,"timeoutSeconds":1}` | +| `resources` | Pod resource requests and limits | `{}` | +| `startupScripts` | Startup scripts to run before Keycloak starts up | `{"keycloak.cli":"{{- .Files.Get "scripts/keycloak.cli" \| nindent 2 }}"}` | +| `extraVolumes` | Add additional volumes, e. g. for custom themes | `""` | +| `extraVolumeMounts` | Add additional volumes mounts, e. g. for custom themes | `""` | +| `extraPorts` | Add additional ports, e. g. for admin console or exposing JGroups ports | `[]` | +| `podDisruptionBudget` | Pod disruption budget | `{}` | +| `statefulsetAnnotations` | Annotations for the StatefulSet | `{}` | +| `statefulsetLabels` | Additional labels for the StatefulSet | `{}` | +| `secrets` | Configuration for secrets that should be created | `{}` | +| `service.annotations` | Annotations for headless and HTTP Services | `{}` | +| `service.labels` | Additional labels for headless and HTTP Services | `{}` | +| `service.type` | The Service type | `ClusterIP` | +| `service.loadBalancerIP` | Optional IP for the load balancer. Used for services of type LoadBalancer only | `""` | +| `loadBalancerSourceRanges` | Optional List of allowed source ranges (CIDRs). Used for service of type LoadBalancer only | `[]` | +| `service.httpPort` | The http Service port | `80` | +| `service.httpNodePort` | The HTTP Service node port if type is NodePort | `""` | +| `service.httpsPort` | The HTTPS Service port | `8443` | +| `service.httpsNodePort` | The HTTPS Service node port if type is NodePort | `""` | +| `service.httpManagementPort` | The WildFly management Service port | `8443` | +| `service.httpManagementNodePort` | The WildFly management node port if type is NodePort | `""` | +| `service.extraPorts` | Additional Service ports, e. g. for custom admin console | `[]` | +| `service.sessionAffinity` | sessionAffinity for Service, e. g. "ClientIP" | `""` | +| `service.sessionAffinityConfig` | sessionAffinityConfig for Service | `{}` | +| `ingress.enabled` | If `true`, an Ingress is created | `false` | +| `ingress.rules` | List of Ingress Ingress rule | see below | +| `ingress.rules[0].host` | Host for the Ingress rule | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.rules[0].paths` | Paths for the Ingress rule | `[/]` | +| `ingress.servicePort` | The Service port targeted by the Ingress | `http` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Additional Ingress labels | `{}` | +| `ingress.tls` | TLS configuration | see below | +| `ingress.tls[0].hosts` | List of TLS hosts | `[keycloak.example.com]` | +| `ingress.tls[0].secretName` | Name of the TLS secret | `""` | +| `ingress.console.enabled` | If `true`, an Ingress for the console is created | `false` | +| `ingress.console.rules` | List of Ingress Ingress rule for the console | see below | +| `ingress.console.rules[0].host` | Host for the Ingress rule for the console | `{{ .Release.Name }}.keycloak.example.com` | +| `ingress.console.rules[0].paths` | Paths for the Ingress rule for the console | `[/auth/admin]` | +| `ingress.console.annotations` | Ingress annotations for the console | `{}` | +| `networkPolicy.enabled` | If true, the ingress network policy is deployed | `false` +| `networkPolicy.extraFrom` | Allows to define allowed external traffic (see Kubernetes doc for network policy `from` format) | `[]` +| `route.enabled` | If `true`, an OpenShift Route is created | `false` | +| `route.path` | Path for the Route | `/` | +| `route.annotations` | Route annotations | `{}` | +| `route.labels` | Additional Route labels | `{}` | +| `route.host` | Host name for the Route | `""` | +| `route.tls.enabled` | If `true`, TLS is enabled for the Route | `true` | +| `route.tls.insecureEdgeTerminationPolicy` | Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` | `Redirect` | +| `route.tls.termination` | TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` | `edge` | +| `pgchecker.image.repository` | Docker image used to check Postgresql readiness at startup | `docker.io/busybox` | +| `pgchecker.image.tag` | Image tag for the pgchecker image | `1.32` | +| `pgchecker.image.pullPolicy` | Image pull policy for the pgchecker image | `IfNotPresent` | +| `pgchecker.securityContext` | SecurityContext for the pgchecker container | `{"allowPrivilegeEscalation":false,"runAsGroup":1000,"runAsNonRoot":true,"runAsUser":1000}` | +| `pgchecker.resources` | Resource requests and limits for the pgchecker container | `{"limits":{"cpu":"10m","memory":"16Mi"},"requests":{"cpu":"10m","memory":"16Mi"}}` | +| `postgresql.enabled` | If `true`, the Postgresql dependency is enabled | `true` | +| `postgresql.postgresqlUsername` | PostgreSQL User to create | `keycloak` | +| `postgresql.postgresqlPassword` | PostgreSQL Password for the new user | `keycloak` | +| `postgresql.postgresqlDatabase` | PostgreSQL Database to create | `keycloak` | +| `serviceMonitor.enabled` | If `true`, a ServiceMonitor resource for the prometheus-operator is created | `false` | +| `serviceMonitor.namespace` | Optionally sets a target namespace in which to deploy the ServiceMonitor resource | `""` | +| `serviceMonitor.namespaceSelector` | Optionally sets a namespace selector for the ServiceMonitor | `{}` | +| `serviceMonitor.annotations` | Annotations for the ServiceMonitor | `{}` | +| `serviceMonitor.labels` | Additional labels for the ServiceMonitor | `{}` | +| `serviceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `serviceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `serviceMonitor.path` | The path at which metrics are served | `/metrics` | +| `serviceMonitor.port` | The Service port at which metrics are served | `http` | +| `extraServiceMonitor.enabled` | If `true`, an additional ServiceMonitor resource for the prometheus-operator is created. Could be used for additional metrics via [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) | `false` | +| `extraServiceMonitor.namespace` | Optionally sets a target namespace in which to deploy the additional ServiceMonitor resource | `""` | +| `extraServiceMonitor.namespaceSelector` | Optionally sets a namespace selector for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.annotations` | Annotations for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.labels` | Additional labels for the additional ServiceMonitor | `{}` | +| `extraServiceMonitor.interval` | Interval at which Prometheus scrapes metrics | `10s` | +| `extraServiceMonitor.scrapeTimeout` | Timeout for scraping | `10s` | +| `extraServiceMonitor.path` | The path at which metrics are served | `/metrics` | +| `extraServiceMonitor.port` | The Service port at which metrics are served | `http` | +| `prometheusRule.enabled` | If `true`, a PrometheusRule resource for the prometheus-operator is created | `false` | +| `prometheusRule.annotations` | Annotations for the PrometheusRule | `{}` | +| `prometheusRule.labels` | Additional labels for the PrometheusRule | `{}` | +| `prometheusRule.rules` | List of rules for Prometheus | `[]` | +| `autoscaling.enabled` | Enable creation of a HorizontalPodAutoscaler resource | `false` | +| `autoscaling.labels` | Additional labels for the HorizontalPodAutoscaler resource | `{}` | +| `autoscaling.minReplicas` | The minimum number of Pods when autoscaling is enabled | `3` | +| `autoscaling.maxReplicas` | The maximum number of Pods when autoscaling is enabled | `10` | +| `autoscaling.metrics` | The metrics configuration for the HorizontalPodAutoscaler | `[{"resource":{"name":"cpu","target":{"averageUtilization":80,"type":"Utilization"}},"type":"Resource"}]` | +| `autoscaling.behavior` | The scaling policy configuration for the HorizontalPodAutoscaler | `{"scaleDown":{"policies":[{"periodSeconds":300,"type":"Pods","value":1}],"stabilizationWindowSeconds":300}` | +| `test.enabled` | If `true`, test resources are created | `false` | +| `test.image.repository` | The image for the test Pod | `docker.io/unguiculus/docker-python3-phantomjs-selenium` | +| `test.image.tag` | The tag for the test Pod image | `v1` | +| `test.image.pullPolicy` | The image pull policy for the test Pod image | `IfNotPresent` | +| `test.podSecurityContext` | SecurityContext for the entire test Pod | `{"fsGroup":1000}` | +| `test.securityContext` | SecurityContext for the test container | `{"runAsNonRoot":true,"runAsUser":1000}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --set replicas=1 +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```console +$ helm install keycloak codecentric/keycloak -n keycloak --values values.yaml +``` + +The chart offers great flexibility. +It can be configured to work with the official Keycloak Docker image but any custom image can be used as well. + +For the offical Docker image, please check it's configuration at https://github.com/keycloak/keycloak-containers/tree/master/server. + +### Usage of the `tpl` Function + +The `tpl` function allows us to pass string values from `values.yaml` through the templating engine. +It is used for the following values: + +* `extraInitContainers` +* `extraContainers` +* `extraEnv` +* `extraEnvFrom` +* `affinity` +* `extraVolumeMounts` +* `extraVolumes` +* `livenessProbe` +* `readinessProbe` + +Additionally, custom labels and annotations can be set on various resources the values of which being passed through `tpl` as well. + +It is important that these values be configured as strings. +Otherwise, installation will fail. +See example for Google Cloud Proxy or default affinity configuration in `values.yaml`. + +### JVM Settings + +Keycloak sets the following system properties by default: +`-Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS -Djava.awt.headless=true` + +You can override these by setting the `JAVA_OPTS` environment variable. +Make sure you configure container support. +This allows you to only configure memory using Kubernetes resources and the JVM will automatically adapt. + +```yaml +extraEnv: | + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true +``` + +### Database Setup + +By default, Bitnami's [PostgreSQL](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) chart is deployed and used as database. +Please refer to this chart for additional PostgreSQL configuration options. + +#### Using an External Database + +The Keycloak Docker image supports various database types. +Configuration happens in a generic manner. + +##### Using a Secret Managed by the Chart + +The following examples uses a PostgreSQL database with a secret that is managed by the Helm chart. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + +extraEnvFrom: | + - secretRef: + name: '{{ include "keycloak.fullname" . }}-db' + +secrets: + db: + stringData: + DB_USER: '{{ .Values.dbUser }}' + DB_PASSWORD: '{{ .Values.dbPassword }}' +``` + +`dbUser` and `dbPassword` are custom values you'd then specify on the commandline using `--set-string`. + +##### Using an Existing Secret + +The following examples uses a PostgreSQL database with a secret. +Username and password are mounted as files. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: mypostgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: mydb + - name: DB_USER_FILE + value: /secrets/db-creds/user + - name: DB_PASSWORD_FILE + value: /secrets/db-creds/password + +extraVolumeMounts: | + - name: db-creds + mountPath: /secrets/db-creds + readOnly: true + +extraVolumes: | + - name: db-creds + secret: + secretName: keycloak-db-creds +``` + +### Creating a Keycloak Admin User + +The Keycloak Docker image supports creating an initial admin user. +It must be configured via environment variables: + +* `KEYCLOAK_USER` or `KEYCLOAK_USER_FILE` +* `KEYCLOAK_PASSWORD` or `KEYCLOAK_PASSWORD_FILE` + +Please refer to the section on database configuration for how to configure a secret for this. + +### High Availability and Clustering + +For high availability, Keycloak must be run with multiple replicas (`replicas > 1`). +The chart has a helper template (`keycloak.serviceDnsName`) that creates the DNS name based on the headless service. + +#### DNS_PING Service Discovery + +JGroups discovery via DNS_PING can be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +#### KUBE_PING Service Discovery + +Recent versions of Keycloak include a new Kubernetes native [KUBE_PING](https://github.com/jgroups-extras/jgroups-kubernetes) service discovery protocol. +This requires a little more configuration than DNS_PING but can easily be achieved with the Helm chart. + +As with DNS_PING some environment variables must be configured as follows: + +```yaml +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" +``` + +However, the Keycloak Pods must also get RBAC permissions to `get` and `list` Pods in the namespace which can be configured as follows: + +```yaml +rbac: + create: true + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +``` + +#### Autoscaling + +Due to the caches in Keycloak only replicating to a few nodes (two in the example configuration above) and the limited controls around autoscaling built into Kubernetes, it has historically been problematic to autoscale Keycloak. +However, in Kubernetes 1.18 [additional controls were introduced](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior) which make it possible to scale down in a more controlled manner. + +The example autoscaling configuration in the values file scales from three up to a maximum of ten Pods using CPU utilization as the metric. Scaling up is done as quickly as required but scaling down is done at a maximum rate of one Pod per five minutes. + +Autoscaling can be enabled as follows: + +```yaml +autoscaling: + enabled: true +``` + +KUBE_PING service discovery seems to be the most reliable mechanism to use when enabling autoscaling, due to being faster than DNS_PING at detecting changes in the cluster. + +### Running Keycloak Behind a Reverse Proxy + +When running Keycloak behind a reverse proxy, which is the case when using an ingress controller, +proxy address forwarding must be enabled as follows: + +```yaml +extraEnv: | + - name: PROXY_ADDRESS_FORWARDING + value: "true" +``` + +### Providing a Custom Theme + +One option is certainly to provide a custom Keycloak image that includes the theme. +However, if you prefer to stick with the official Keycloak image, you can use an init container as theme provider. + +Create your own theme and package it up into a Docker image. + +```docker +FROM busybox +COPY mytheme /mytheme +``` + +In combination with an `emptyDir` that is shared with the Keycloak container, configure an init container that runs your theme image and copies the theme over to the right place where Keycloak will pick it up automatically. + +```yaml +extraInitContainers: | + - name: theme-provider + image: myuser/mytheme:1 + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes/mytheme + +extraVolumes: | + - name: theme + emptyDir: {} +``` + +### Setting a Custom Realm + +A realm can be added by creating a secret or configmap for the realm json file and then supplying this into the chart. +It can be mounted using `extraVolumeMounts` and then referenced as environment variable `KEYCLOAK_IMPORT`. +First we need to create a Secret from the realm JSON file using `kubectl create secret generic realm-secret --from-file=realm.json` which we need to reference in `values.yaml`: + +```yaml +extraVolumes: | + - name: realm-secret + secret: + secretName: realm-secret + +extraVolumeMounts: | + - name: realm-secret + mountPath: "/realm/" + readOnly: true + +extraEnv: | + - name: KEYCLOAK_IMPORT + value: /realm/realm.json +``` + +Alternatively, the realm file could be added to a custom image. + +After startup the web admin console for the realm should be available on the path /auth/admin/\/console/. + +### Using Google Cloud SQL Proxy + +Depending on your environment you may need a local proxy to connect to the database. +This is, e. g., the case for Google Kubernetes Engine when using Google Cloud SQL. +Create the secret for the credentials as documented [here](https://cloud.google.com/sql/docs/postgres/connect-kubernetes-engine) and configure the proxy as a sidecar. + +Because `extraContainers` is a string that is passed through the `tpl` function, it is possible to create custom values and use them in the string. + +```yaml +postgresql: + # Disable PostgreSQL dependency + enabled: false + +# Custom values for Google Cloud SQL +cloudsql: + project: my-project + region: europe-west1 + instance: my-instance + +extraContainers: | + - name: cloudsql-proxy + image: gcr.io/cloudsql-docker/gce-proxy:1.17 + command: + - /cloud_sql_proxy + args: + - -instances={{ .Values.cloudsql.project }}:{{ .Values.cloudsql.region }}:{{ .Values.cloudsql.instance }}=tcp:5432 + - -credential_file=/secrets/cloudsql/credentials.json + volumeMounts: + - name: cloudsql-creds + mountPath: /secrets/cloudsql + readOnly: true + +extraVolumes: | + - name: cloudsql-creds + secret: + secretName: cloudsql-instance-credentials + +extraEnv: | + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: "127.0.0.1" + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: postgres + - name: DB_USER + value: myuser + - name: DB_PASSWORD + value: mypassword +``` + +### Changing the Context Path + +By default, Keycloak is served under context `/auth`. +This can be changed as follows: + +```yaml +contextPath: mycontext + +startupScripts: + # cli script that reconfigures WildFly + contextPath.cli: | + embed-server --server-config=standalone-ha.xml --std-out=echo + batch + {{- if ne .Values.contextPath "auth" }} + /subsystem=keycloak-server/:write-attribute(name=web-context,value={{ if eq .Values.contextPath "" }}/{{ else }}{{ .Values.contextPath }}{{ end }}) + {{- if eq .Values.contextPath "" }} + /subsystem=undertow/server=default-server/host=default-host:write-attribute(name=default-web-module,value=keycloak-server.war) + {{- end }} + {{- end }} + run-batch + stop-embedded-server + +livenessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +readinessProbe: | + httpGet: + path: {{ if ne .Values.contextPath "" }}/{{ .Values.contextPath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +The above YAML references introduces the custom value `contextPath` which is possible because `startupScripts`, `livenessProbe`, and `readinessProbe` are templated using the `tpl` function. +Note that it must not start with a slash. +Alternatively, you may supply it via CLI flag: + +```console +--set-string contextPath=mycontext +``` + +### Prometheus Metrics Support + +#### WildFly Metrics + +WildFly can expose metrics on the management port. +In order to achieve this, the environment variable `KEYCLOAK_STATISTICS` must be set. + +```yaml +extraEnv: | + - name: KEYCLOAK_STATISTICS + value: all +``` + +Add a ServiceMonitor if using prometheus-operator: + +```yaml +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing the ServiceMonitor and for adding custom Prometheus rules. + +Add annotations if you don't use prometheus-operator: + +```yaml +service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9990" +``` + +#### Keycloak Metrics SPI + +Optionally, it is possible to add [Keycloak Metrics SPI](https://github.com/aerogear/keycloak-metrics-spi) via init container. + +A separate `ServiceMonitor` can be enabled to scrape metrics from the SPI: + +```yaml +extraServiceMonitor: + # If `true`, an additional ServiceMonitor resource for the prometheus-operator is created + enabled: true +``` + +Checkout `values.yaml` for customizing this ServiceMonitor. + +Note that the metrics endpoint is exposed on the HTTP port. +You may want to restrict access to it in your ingress controller configuration. +For ingress-nginx, this could be done as follows: + +```yaml +annotations: + nginx.ingress.kubernetes.io/server-snippet: | + location ~* /auth/realms/[^/]+/metrics { + return 403; + } +``` + +## Why StatefulSet? + +The chart sets node identifiers to the system property `jboss.node.name` which is in fact the pod name. +Node identifiers must not be longer than 23 characters. +This can be problematic because pod names are quite long. +We would have to truncate the chart's fullname to six characters because pods get a 17-character suffix (e. g. `-697f8b7655-mf5ht`). +Using a StatefulSet allows us to truncate to 20 characters leaving room for up to 99 replicas, which is much better. +Additionally, we get stable values for `jboss.node.name` which can be advantageous for cluster discovery. +The headless service that governs the StatefulSet is used for DNS discovery via DNS_PING. + +## Upgrading + +### From chart < 10.0.0 + +* Keycloak is updated to 12.0.4 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 9.0.0 + +The Keycloak chart received a major facelift and, thus, comes with breaking changes. +Opinionated stuff and things that are now baked into Keycloak's Docker image were removed. +Configuration is more generic making it easier to use custom Docker images that are configured differently than the official one. + +* Values are no longer nested under `keycloak`. +* Besides setting the node identifier, no CLI changes are performed out of the box +* Environment variables for the Postresql dependency are set automatically if enabled. + Otherwise, no environment variables are set by default. +* Optionally enables creating RBAC resources with configurable rules (e. g. for KUBE_PING) +* PostgreSQL chart dependency is updated to 9.1.1 + +### From chart versions < 8.0.0 + +* Keycloak is updated to 10.0.0 +* PostgreSQL chart dependency is updated to 8.9.5 + +The upgrade should be seemless. +No special care has to be taken. + +### From chart versions < 7.0.0 + +Version 7.0.0 update breaks backwards-compatibility with the existing `keycloak.persistence.existingSecret` scheme. + +#### Changes in Configuring Database Credentials from an Existing Secret + +Both `DB_USER` and `DB_PASS` are always read from a Kubernetes Secret. +This is a requirement if you are provisioning database credentials dynamically - either via an Operator or some secret-management engine. + +The variable referencing the password key name has been renamed from `keycloak.persistence.existingSecretKey` to `keycloak.persistence.existingSecretPasswordKey` + +A new, optional variable for referencing the username key name for populating the `DB_USER` env has been added: +`keycloak.persistence.existingSecretUsernameKey`. + +If `keycloak.persistence.existingSecret` is left unset, a new Secret will be provisioned populated with the `dbUser` and `dbPassword` Helm variables. + +###### Example configuration: +```yaml +keycloak: + persistence: + existingSecret: keycloak-provisioned-db-credentials + existingSecretPasswordKey: PGPASSWORD + existingSecretUsernameKey: PGUSER + ... +``` +### From chart versions < 6.0.0 + +#### Changes in Probe Configuration + +Now both readiness and liveness probes are configured as strings that are then passed through the `tpl` function. +This allows for greater customizability of the readiness and liveness probes. + +The defaults are unchanged, but since 6.0.0 configured as follows: + +```yaml + livenessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + readinessProbe: | + httpGet: + path: {{ if ne .Values.keycloak.basepath "" }}/{{ .Values.keycloak.basepath }}{{ end }}/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 +``` + +#### Changes in Existing Secret Configuration + +This can be useful if you create a secret in a parent chart and want to reference that secret. +Applies to `keycloak.existingSecret` and `keycloak.persistence.existingSecret`. + +_`values.yaml` of parent chart:_ +```yaml +keycloak: + keycloak: + existingSecret: '{{ .Release.Name }}-keycloak-secret' +``` + +#### HTTPS Port Added + +The HTTPS port was added to the pod and to the services. +As a result, service ports are now configured differently. + + +### From chart versions < 5.0.0 + +Version 5.0.0 is a major update. + +* The chart now follows the new Kubernetes label recommendations: +https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +* Several changes to the StatefulSet render an out-of-the-box upgrade impossible because StatefulSets only allow updates to a limited set of fields +* The chart uses the new support for running scripts at startup that has been added to Keycloak's Docker image. +If you use this feature, you will have to adjust your configuration + +However, with the following manual steps an automatic upgrade is still possible: + +1. Adjust chart configuration as necessary (e. g. startup scripts) +1. Perform a non-cascading deletion of the StatefulSet which keeps the pods running +1. Add the new labels to the pods +1. Run `helm upgrade` + +Use a script like the following to add labels and to delete the StatefulSet: + +```console +#!/bin/sh + +release= +namespace= + +kubectl delete statefulset -n "$namespace" -l app=keycloak -l release="$release" --cascade=false + +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/name=keycloak +kubectl label pod -n "$namespace" -l app=keycloak -l release="$release" app.kubernetes.io/instance="$release" +``` + +**NOTE:** Version 5.0.0 also updates the Postgresql dependency which has received a major upgrade as well. +In case you use this dependency, the database must be upgraded first. +Please refer to the Postgresql chart's upgrading section in its README for instructions. diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/.helmignore b/roles/cmoa_install/files/04-keycloak/charts/postgresql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/Chart.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/Chart.yaml new file mode 100644 index 0000000..48d8f2f --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.8.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 9.1.1 diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/README.md b/roles/cmoa_install/files/04-keycloak/charts/postgresql/README.md new file mode 100644 index 0000000..c84cc7b --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/README.md @@ -0,0 +1,625 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR; + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. |`nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List |`nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `securityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 0000000..b4d8828 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 0.3.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: http://www.bitnami.com/ +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +version: 0.3.1 diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/README.md b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/README.md new file mode 100644 index 0000000..ab50967 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/README.md @@ -0,0 +1,228 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR; + +```yaml +dependencies: + - name: common + version: 0.1.0 + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +**Names** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +**Images** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $` | + +**Labels** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +**Storage** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +**TplValues** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +**Capabilities** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +**Warnings** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +**Secrets** + +| Helper identifier | Description | Expected Input | +|---------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +**Example of use** + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +## Notable changes + +N/A diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..c0ea2c7 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..ee6673a --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,44 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" $) }} +*/}} +{{- define "common.images.pullSecrets" -}} +{{- if .global }} +{{- if .global.imagePullSecrets }} +imagePullSecrets: + {{- range .global.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- else }} +{{- $pullSecrets := list }} +{{- range .images }} + {{- if .pullSecrets }} + {{- $pullSecrets = append $pullSecrets .pullSecrets }} + {{- end }} +{{- end }} +{{- if $pullSecrets }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..adf2a74 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..d6165a2 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = cat $name .defaultNameSuffix -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml new file mode 100644 index 0000000..9ecdc93 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml new file mode 100644 index 0000000..a936299 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,4 @@ +commonAnnotations: + helm.sh/hook: "pre-install, pre-upgrade" + helm.sh/hook-weight: "-1" + diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 0000000..347d3b4 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/README.md b/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/README.md new file mode 100644 index 0000000..1813a2f --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md b/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md new file mode 100644 index 0000000..184c187 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..cba3809 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.lock b/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.lock new file mode 100644 index 0000000..1069b62 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.3.1 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-07-15T00:56:02.067804177Z" diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.yaml new file mode 100644 index 0000000..868eee6 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami \ No newline at end of file diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..6dec604 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,54 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.imxc.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.imxc.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace imxc {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace imxc --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace imxc -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace imxc -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace imxc {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace imxc svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..a7008a1 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,494 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml new file mode 100644 index 0000000..b29ef60 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 0000000..f21a976 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 0000000..6637867 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 0000000..6b7a317 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 0000000..b993c99 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 0000000..2a7b372 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..da0b3ab --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 0000000..b0c41b1 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/pv.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/pv.yaml new file mode 100644 index 0000000..ddd7d7c --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/pv.yaml @@ -0,0 +1,27 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: keycloak-saas +spec: + storageClassName: manual + capacity: + storage: 8Gi + accessModes: + - ReadWriteOnce + #- ReadWriteMany + hostPath: + #path: "/home/keycloak/keycloak" + path: /mnt/keycloak-postgresql + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + #- imxc-worker1 + - {{ .Values.node.affinity }} + claimRef: + name: data-keycloak-saas-postgresql-0 + #namespace: auth + diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/role.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/role.yaml new file mode 100644 index 0000000..6d3cf50 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 0000000..b7daa2a --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: imxc +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..c93dbe0 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..17f7ff3 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 0000000..3e643e1 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - imxc + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 0000000..a712a03 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,340 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 0000000..35c6293 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,510 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "postgresql.tplValue" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "postgresql.tplValue" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + #claimName: {{ tpl . $ }} + claimName: data-keycloak-saas-postgresql-0 +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 0000000..4913157 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 0000000..885c7bb --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc.yaml new file mode 100644 index 0000000..e9fc504 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/values-production.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/values-production.yaml new file mode 100644 index 0000000..a43670f --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/values-production.yaml @@ -0,0 +1,591 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: 10.10.31.243:5000 # docker.io + repository: postgresql # bitnami/postgresql + tag: 11.8.0-debian-10-r61 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: 10.10.31.243:5000 # docker.io + repository: minideb # bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.schema.json b/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.schema.json new file mode 100644 index 0000000..7b5e2ef --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.yaml b/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.yaml new file mode 100644 index 0000000..5f831ef --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/charts/postgresql/values.yaml @@ -0,0 +1,604 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + #registry: cdm-dev.exem-oss.org/keycloak + registry: 10.10.31.243:5000/keycloak # registry.openstacklocal:5000/keycloak + repository: keycloak-postgresql + tag: 11.8.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + #registry: cdm-dev.exem-oss.org + registry: 10.10.31.243:5000 # registry.openstacklocal:5000 + repository: minideb # keycloak/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data +#postgresqlDataDir: /var/lib/postgresql/data/pgdata + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: IfNotPresent + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: 10.10.31.243:5000 # docker.io + repository: postgres-exporter # bitnami/postgres-exporter + tag: 0.8.0-debian-10-r166 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +#persistentVolume nodeAffinity Value Require this value +node: + affinity: imxc-worker1 diff --git a/roles/cmoa_install/files/04-keycloak/ci/h2-values.yaml b/roles/cmoa_install/files/04-keycloak/ci/h2-values.yaml new file mode 100644 index 0000000..10d1705 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/ci/h2-values.yaml @@ -0,0 +1,38 @@ +extraEnv: | + - name: DB_VENDOR + value: h2 + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + annotations: + my-test-annotation: Test secret for {{ include "keycloak.fullname" . }} + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: false + +test: + enabled: true diff --git a/roles/cmoa_install/files/04-keycloak/ci/postgres-ha-values.yaml b/roles/cmoa_install/files/04-keycloak/ci/postgres-ha-values.yaml new file mode 100644 index 0000000..e92c2c7 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/ci/postgres-ha-values.yaml @@ -0,0 +1,73 @@ +replicas: 2 + +podLabels: + test-label: test-label-value + +podAnnotations: + test-annotation: test-annotation-value-{{ .Release.Name }} + test-int-annotation: "12345" + +startupScripts: + hello.sh: | + #!/bin/sh + + echo '********************************************************************************' + echo '* *' + echo '* Hello from my startup script! *' + echo '* *' + echo '********************************************************************************' + +lifecycleHooks: | + postStart: + exec: + command: + - /bin/sh + - -c + - echo 'Hello from lifecycle hook!' + +extraEnv: | + - name: JGROUPS_DISCOVERY_PROTOCOL + value: dns.DNS_PING + - name: JGROUPS_DISCOVERY_PROPERTIES + value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + - name: KEYCLOAK_USER_FILE + value: /secrets/admin-creds/user + - name: KEYCLOAK_PASSWORD_FILE + value: /secrets/admin-creds/password + - name: KEYCLOAK_STATISTICS + value: all + - name: JAVA_OPTS + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.net.preferIPv4Stack=true + -Djboss.modules.system.pkgs=$JBOSS_MODULES_SYSTEM_PKGS + -Djava.awt.headless=true + +secrets: + admin-creds: + stringData: + user: admin + password: secret + +extraVolumeMounts: | + - name: admin-creds + mountPath: /secrets/admin-creds + readOnly: true + +extraVolumes: | + - name: admin-creds + secret: + secretName: '{{ include "keycloak.fullname" . }}-admin-creds' + +postgresql: + enabled: true + persistence: + enabled: true + +test: + enabled: true diff --git a/roles/cmoa_install/files/04-keycloak/requirements.lock b/roles/cmoa_install/files/04-keycloak/requirements.lock new file mode 100644 index 0000000..4231a57 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.1 +digest: sha256:33ee9e6caa9e519633071fd71aedd9de7906b9a9d7fb629eb814d9f72bb8d68e +generated: "2020-07-24T07:40:55.78753+02:00" diff --git a/roles/cmoa_install/files/04-keycloak/requirements.yaml b/roles/cmoa_install/files/04-keycloak/requirements.yaml new file mode 100644 index 0000000..f3409a3 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: postgresql + version: 9.1.1 + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled diff --git a/roles/cmoa_install/files/04-keycloak/scripts/keycloak.cli b/roles/cmoa_install/files/04-keycloak/scripts/keycloak.cli new file mode 100644 index 0000000..1469963 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/scripts/keycloak.cli @@ -0,0 +1,13 @@ +embed-server --server-config=standalone-ha.xml --std-out=echo +batch + +echo Configuring node identifier + +## Sets the node identifier to the node name (= pod name). Node identifiers have to be unique. They can have a +## maximum length of 23 characters. Thus, the chart's fullname template truncates its length accordingly. +/subsystem=transactions:write-attribute(name=node-identifier, value=${jboss.node.name}) + +echo Finished configuring node identifier + +run-batch +stop-embedded-server diff --git a/roles/cmoa_install/files/04-keycloak/templates/NOTES.txt b/roles/cmoa_install/files/04-keycloak/templates/NOTES.txt new file mode 100644 index 0000000..e76e064 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/NOTES.txt @@ -0,0 +1,61 @@ +*********************************************************************** +* * +* Keycloak Helm Chart by codecentric AG * +* * +*********************************************************************** + +{{- if .Values.ingress.enabled }} + +Keycloak was installed with an Ingress and an be reached at the following URL(s): +{{ range $unused, $rule := .Values.ingress.rules }} + {{- range $rule.paths }} + - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $rule.host }}{{ . }} + {{- end }} +{{- end }} + +{{- else if eq "NodePort" .Values.service.type }} + +Keycloak was installed with a Service of type NodePort. +{{ if .Values.service.httpNodePort }} +Get its HTTP URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"http\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} +{{ if .Values.service.httpsNodePort }} +Get its HTTPS URL with the following commands: + +export NODE_PORT=$(kubectl get --namespace imxc service {{ include "keycloak.fullname" . }}-http --template='{{"{{ range .spec.ports }}{{ if eq .name \"https\" }}{{ .nodePort }}{{ end }}{{ end }}"}}') +export NODE_IP=$(kubectl get nodes --namespace imxc -o jsonpath="{.items[0].status.addresses[0].address}") +echo "http://$NODE_IP:$NODE_PORT" +{{- end }} + +{{- else if eq "LoadBalancer" .Values.service.type }} + +Keycloak was installed with a Service of type LoadBalancer + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace imxc service -w {{ include "keycloak.fullname" . }}' + +Get its HTTP URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpPort }}" + +Get its HTTPS URL with the following commands: + +export SERVICE_IP=$(kubectl get service --namespace imxc {{ include "keycloak.fullname" . }}-http --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") +echo "http://$SERVICE_IP:{{ .Values.service.httpsPort }}" + +{{- else if eq "ClusterIP" .Values.service.type }} + +Keycloak was installed with a Service of type ClusterIP + +Create a port-forwarding with the following commands: + +export POD_NAME=$(kubectl get pods --namespace imxc -l "app.kubernetes.io/name={{ include "keycloak.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o name) +echo "Visit http://127.0.0.1:8080 to use your application" +kubectl --namespace imxc port-forward "$POD_NAME" 8080 + +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/templates/_helpers.tpl b/roles/cmoa_install/files/04-keycloak/templates/_helpers.tpl new file mode 100644 index 0000000..d019e17 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/_helpers.tpl @@ -0,0 +1,87 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "keycloak.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate to 20 characters because this is used to set the node identifier in WildFly which is limited to +23 characters. This allows for a replica suffix for up to 99 replicas. +*/}} +{{- define "keycloak.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 20 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 20 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "keycloak.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "keycloak.labels" -}} +helm.sh/chart: {{ include "keycloak.chart" . }} +{{ include "keycloak.selectorLabels" . }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "keycloak.selectorLabels" -}} +app.kubernetes.io/name: {{ include "keycloak.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "keycloak.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "keycloak.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for the postgres requirement. +*/}} +{{- define "keycloak.postgresql.fullname" -}} +{{- $postgresContext := dict "Values" .Values.postgresql "Release" .Release "Chart" (dict "Name" "postgresql") -}} +{{ include "postgresql.fullname" $postgresContext }} +{{- end }} + +{{/* +Create the service DNS name. +*/}} +{{- define "keycloak.serviceDnsName" -}} +{{ include "keycloak.fullname" . }}-headless.imxc.svc.{{ .Values.clusterDomain }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "keycloak.ingressAPIVersion" -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- print "networking.k8s.io/v1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/templates/configmap-startup.yaml b/roles/cmoa_install/files/04-keycloak/templates/configmap-startup.yaml new file mode 100644 index 0000000..8fbb462 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/configmap-startup.yaml @@ -0,0 +1,14 @@ +{{- if .Values.startupScripts }} +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-startup + labels: + {{- include "keycloak.labels" . | nindent 4 }} +data: + {{- range $key, $value := .Values.startupScripts }} + {{ $key }}: | + {{- tpl $value $ | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/templates/hpa.yaml b/roles/cmoa_install/files/04-keycloak/templates/hpa.yaml new file mode 100644 index 0000000..c772b76 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/hpa.yaml @@ -0,0 +1,22 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.autoscaling.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include "keycloak.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- toYaml .Values.autoscaling.metrics | nindent 4 }} + behavior: + {{- toYaml .Values.autoscaling.behavior | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/templates/ingress.yaml b/roles/cmoa_install/files/04-keycloak/templates/ingress.yaml new file mode 100644 index 0000000..d749e24 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/ingress.yaml @@ -0,0 +1,104 @@ +{{- $ingress := .Values.ingress -}} +{{- if $ingress.enabled -}} +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- if $ingress.console.enabled }} +--- +apiVersion: {{ include "keycloak.ingressAPIVersion" . }} +kind: Ingress +metadata: + name: {{ include "keycloak.fullname" . }}-console + {{- with $ingress.console.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $ingress.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.console.rules }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }} + pathType: Prefix + backend: + service: + name: {{ include "keycloak.fullname" $ }}-http + port: + name: {{ $ingress.servicePort }} + {{- else }} + backend: + serviceName: {{ include "keycloak.fullname" $ }}-http + servicePort: {{ $ingress.servicePort }} + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/templates/networkpolicy.yaml b/roles/cmoa_install/files/04-keycloak/templates/networkpolicy.yaml new file mode 100644 index 0000000..5e7c7b6 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/networkpolicy.yaml @@ -0,0 +1,46 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "keycloak.fullname" . | quote }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.networkPolicy.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + ingress: + {{- with .Values.networkPolicy.extraFrom }} + - from: + {{- toYaml . | nindent 8 }} + ports: + - protocol: TCP + port: {{ $.Values.service.httpPort }} + - protocol: TCP + port: {{ $.Values.service.httpsPort }} + {{ range $.Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} + {{- end }} + - from: + - podSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 14 }} + ports: + - protocol: TCP + port: {{ .Values.service.httpPort }} + - protocol: TCP + port: {{ .Values.service.httpsPort }} + - protocol: TCP + port: {{ .Values.service.httpManagementPort }} + {{ range .Values.extraPorts }} + - protocol: {{ default "TCP" .protocol }} + port: {{ .containerPort }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/templates/poddisruptionbudget.yaml b/roles/cmoa_install/files/04-keycloak/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..39cc390 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/poddisruptionbudget.yaml @@ -0,0 +1,13 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/templates/prometheusrule.yaml b/roles/cmoa_install/files/04-keycloak/templates/prometheusrule.yaml new file mode 100644 index 0000000..69af5e7 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- with .Values.prometheusRule -}} +{{- if .enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "keycloak.fullname" $ }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "keycloak.fullname" $ }} + rules: + {{- toYaml .rules | nindent 8 }} +{{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/templates/rbac.yaml b/roles/cmoa_install/files/04-keycloak/templates/rbac.yaml new file mode 100644 index 0000000..9ca0a2b --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/rbac.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create .Values.rbac.rules }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +rules: + {{- toYaml .Values.rbac.rules | nindent 2 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "keycloak.fullname" . }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "keycloak.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "keycloak.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/templates/route.yaml b/roles/cmoa_install/files/04-keycloak/templates/route.yaml new file mode 100644 index 0000000..9507d56 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/route.yaml @@ -0,0 +1,34 @@ +{{- $route := .Values.route -}} +{{- if $route.enabled -}} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with $route.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := $route.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: +{{- if $route.host }} + host: {{ tpl $route.host $ | quote }} +{{- end }} + path: {{ $route.path }} + port: + targetPort: http + to: + kind: Service + name: {{ include "keycloak.fullname" $ }}-http + weight: 100 + {{- if $route.tls.enabled }} + tls: + insecureEdgeTerminationPolicy: {{ $route.tls.insecureEdgeTerminationPolicy }} + termination: {{ $route.tls.termination }} + {{- end }} +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/templates/secrets.yaml b/roles/cmoa_install/files/04-keycloak/templates/secrets.yaml new file mode 100644 index 0000000..c1cb796 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/secrets.yaml @@ -0,0 +1,29 @@ +{{- range $nameSuffix, $values := .Values.secrets -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $nameSuffix }} + {{- with $values.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := $values.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +type: {{ default "Opaque" $values.type }} +{{- with $values.data }} +data: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with $values.stringData }} +stringData: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 2 }} + {{- end }} +{{- end }} +--- +{{- end -}} diff --git a/roles/cmoa_install/files/04-keycloak/templates/service-headless.yaml b/roles/cmoa_install/files/04-keycloak/templates/service-headless.yaml new file mode 100644 index 0000000..0c22ec9 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/service-headless.yaml @@ -0,0 +1,18 @@ +{{- $highAvailability := gt (int .Values.replicas) 1 -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-headless + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: headless +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + protocol: TCP + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/04-keycloak/templates/service-http.yaml b/roles/cmoa_install/files/04-keycloak/templates/service-http.yaml new file mode 100644 index 0000000..c4a1dc9 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/service-http.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "keycloak.fullname" . }}-http + {{- with .Values.service.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.service.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: http +spec: + type: {{ .Values.service.type }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq "LoadBalancer" .Values.service.type) .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- with .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: http + port: {{ .Values.service.httpPort }} + targetPort: http + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpNodePort }} + nodePort: {{ .Values.service.httpNodePort }} + {{- end }} + protocol: TCP + - name: https + port: {{ .Values.service.httpsPort }} + targetPort: https + {{- if and (or (eq "NodePort" .Values.service.type) (eq "LoadBalancer" .Values.service.type) ) .Values.service.httpsNodePort }} + nodePort: {{ .Values.service.httpsNodePort }} + {{- end }} + protocol: TCP + - name: http-management + port: {{ .Values.service.httpManagementPort }} + targetPort: http-management + {{- if and (eq "NodePort" .Values.service.type) .Values.service.httpManagementNodePort }} + nodePort: {{ .Values.service.httpManagementNodePort }} + {{- end }} + protocol: TCP + {{- with .Values.service.extraPorts }} + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + {{- include "keycloak.selectorLabels" . | nindent 4 }} diff --git a/roles/cmoa_install/files/04-keycloak/templates/serviceaccount.yaml b/roles/cmoa_install/files/04-keycloak/templates/serviceaccount.yaml new file mode 100644 index 0000000..1d8f3f0 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "keycloak.serviceAccountName" . }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.serviceAccount.labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +imagePullSecrets: + {{- toYaml .Values.serviceAccount.imagePullSecrets | nindent 4 }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/templates/servicemonitor.yaml b/roles/cmoa_install/files/04-keycloak/templates/servicemonitor.yaml new file mode 100644 index 0000000..ba97f62 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- range $key, $serviceMonitor := dict "wildfly" .Values.serviceMonitor "extra" .Values.extraServiceMonitor }} +{{- with $serviceMonitor }} +{{- if .enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "keycloak.fullname" $ }}-{{ $key }} + {{- with .namespace }} + namespace: {{ . }} + {{- end }} + {{- with .annotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" $ | nindent 4 }} + {{- range $key, $value := .labels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + {{- with .namespaceSelector }} + namespaceSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "keycloak.selectorLabels" $ | nindent 6 }} + app.kubernetes.io/component: http + endpoints: + - port: {{ .port }} + path: {{ .path }} + interval: {{ .interval }} + scrapeTimeout: {{ .scrapeTimeout }} +{{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/templates/statefulset.yaml b/roles/cmoa_install/files/04-keycloak/templates/statefulset.yaml new file mode 100644 index 0000000..8278986 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/statefulset.yaml @@ -0,0 +1,208 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "keycloak.fullname" . }} + {{- with .Values.statefulsetAnnotations }} + annotations: + {{- range $key, $value := . }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "keycloak.labels" . | nindent 4 }} + {{- range $key, $value := .Values.statefulsetLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 6 }} + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicas }} + {{- end }} + serviceName: {{ include "keycloak.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config-startup: {{ include (print .Template.BasePath "/configmap-startup.yaml") . | sha256sum }} + checksum/secrets: {{ tpl (toYaml .Values.secrets) . | sha256sum }} + {{- range $key, $value := .Values.podAnnotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + labels: + {{- include "keycloak.selectorLabels" . | nindent 8 }} + {{- if and .Values.postgresql.enabled (and .Values.postgresql.networkPolicy .Values.postgresql.networkPolicy.enabled) }} + {{ include "keycloak.postgresql.fullname" . }}-client: "true" + {{- end }} + {{- range $key, $value := .Values.podLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }} + {{- end }} + spec: + {{- if or .Values.postgresql.enabled .Values.extraInitContainers }} + initContainers: + {{- if .Values.postgresql.enabled }} + - name: pgchecker + image: "{{ .Values.pgchecker.image.repository }}:{{ .Values.pgchecker.image.tag }}" + imagePullPolicy: {{ .Values.pgchecker.image.pullPolicy }} + securityContext: + {{- toYaml .Values.pgchecker.securityContext | nindent 12 }} + command: + - sh + - -c + - | + echo 'Waiting for PostgreSQL to become ready...' + + until printf "." && nc -z -w 2 {{ include "keycloak.postgresql.fullname" . }} {{ .Values.postgresql.service.port }}; do + sleep 2; + done; + + echo 'PostgreSQL OK ✓' + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + resources: + {{- toYaml .Values.pgchecker.resources | nindent 12 }} + {{- end }} + {{- with .Values.extraInitContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: keycloak + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + {{- toYaml .Values.command | nindent 12 }} + args: + {{- toYaml .Values.args | nindent 12 }} + {{- with .Values.lifecycleHooks }} + {{- tpl . $ | nindent 12 }} + {{- end }} + env: + - name: KEYCLOAK_USER + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_USERNAME + - name: KEYCLOAK_PASSWORD + value: "admin" + #valueFrom: + # secretKeyRef: + # name: keycloak-secret + # key: KEYCLOAK_MASTER_PASSWORD + {{- if .Values.postgresql.enabled }} + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: {{ include "keycloak.postgresql.fullname" . }} + - name: DB_PORT + value: {{ .Values.postgresql.service.port | quote }} + - name: DB_DATABASE + value: {{ .Values.postgresql.postgresqlDatabase | quote }} + - name: DB_USER + value: {{ .Values.postgresql.postgresqlUsername | quote }} + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.postgresql.fullname" . }} + key: postgresql-password + {{- end }} + {{- with .Values.extraEnv }} + {{- tpl . $ | nindent 12 }} + {{- end }} + envFrom: + {{- with .Values.extraEnvFrom }} + {{- tpl . $ | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: https + containerPort: 8443 + protocol: TCP + - name: http-management + containerPort: 9990 + protocol: TCP + {{- with .Values.extraPorts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- tpl . $ | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - mountPath: /opt/jboss/keycloak/themes/cloudmoa/ + name: themes-upper-directory + {{- range $key, $value := .Values.startupScripts }} + - name: startup + mountPath: "/opt/jboss/startup-scripts/{{ $key }}" + subPath: "{{ $key }}" + readOnly: true + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- tpl . $ | nindent 12 }} + {{- end }} + {{- with .Values.extraContainers }} + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "keycloak.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- with .Values.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + restartPolicy: {{ .Values.restartPolicy }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + volumes: + - name: themes-upper-directory + hostPath: + path: /root/oci/infra-set/keycloak/keycloak_theme/ + type: DirectoryOrCreate + {{- with .Values.startupScripts }} + - name: startup + configMap: + name: {{ include "keycloak.fullname" $ }}-startup + defaultMode: 0555 + items: + {{- range $key, $value := . }} + - key: {{ $key }} + path: {{ $key }} + {{- end }} + {{- end }} + {{- with .Values.extraVolumes }} + {{- tpl . $ | nindent 8 }} + {{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/templates/test/configmap-test.yaml b/roles/cmoa_install/files/04-keycloak/templates/test/configmap-test.yaml new file mode 100644 index 0000000..8dda781 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/test/configmap-test.yaml @@ -0,0 +1,50 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + annotations: + helm.sh/hook: test + helm.sh/hook-delete-policy: hook-succeeded +data: + test.py: | + import os + from selenium import webdriver + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions + from urllib.parse import urlparse + + print('Creating PhantomJS driver...') + driver = webdriver.PhantomJS(service_log_path='/tmp/ghostdriver.log') + + base_url = 'http://{{ include "keycloak.fullname" . }}-http{{ if ne 80 (int .Values.service.httpPort) }}:{{ .Values.service.httpPort }}{{ end }}' + + print('Opening Keycloak...') + driver.get('{0}/auth/admin/'.format(base_url)) + + username = os.environ['KEYCLOAK_USER'] + password = os.environ['KEYCLOAK_PASSWORD'] + + username_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "username"))) + password_input = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "password"))) + login_button = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located((By.ID, "kc-login"))) + + print('Entering username...') + username_input.send_keys(username) + + print('Entering password...') + password_input.send_keys(password) + + print('Clicking login button...') + login_button.click() + + WebDriverWait(driver, 30).until(lambda driver: '/auth/admin/master/console/' in driver.current_url) + + print('Admin console visible. Login successful.') + + driver.quit() + + {{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/templates/test/pod-test.yaml b/roles/cmoa_install/files/04-keycloak/templates/test/pod-test.yaml new file mode 100644 index 0000000..5b166f2 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/templates/test/pod-test.yaml @@ -0,0 +1,43 @@ +{{- if .Values.test.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "keycloak.fullname" . }}-test + labels: + {{- include "keycloak.labels" . | nindent 4 }} + app.kubernetes.io/component: test + annotations: + helm.sh/hook: test +spec: + securityContext: + {{- toYaml .Values.test.podSecurityContext | nindent 4 }} + containers: + - name: keycloak-test + image: "{{ .Values.test.image.repository }}:{{ .Values.test.image.tag }}" + imagePullPolicy: {{ .Values.test.image.pullPolicy }} + securityContext: + {{- toYaml .Values.test.securityContext | nindent 8 }} + command: + - python3 + args: + - /tests/test.py + env: + - name: KEYCLOAK_USER + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: user + - name: KEYCLOAK_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keycloak.fullname" . }}-admin-creds + key: password + volumeMounts: + - name: tests + mountPath: /tests + volumes: + - name: tests + configMap: + name: {{ include "keycloak.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/roles/cmoa_install/files/04-keycloak/values.schema.json b/roles/cmoa_install/files/04-keycloak/values.schema.json new file mode 100644 index 0000000..47c2aa3 --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/values.schema.json @@ -0,0 +1,434 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "required": [ + "image" + ], + "definitions": { + "image": { + "type": "object", + "required": [ + "repository", + "tag" + ], + "properties": { + "pullPolicy": { + "type": "string", + "pattern": "^(Always|Never|IfNotPresent)$" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + } + } + }, + "properties": { + "affinity": { + "type": "string" + }, + "args": { + "type": "array" + }, + "clusterDomain": { + "type": "string" + }, + "command": { + "type": "array" + }, + "enableServiceLinks": { + "type": "boolean" + }, + "extraContainers": { + "type": "string" + }, + "extraEnv": { + "type": "string" + }, + "extraEnvFrom": { + "type": "string" + }, + "extraInitContainers": { + "type": "string" + }, + "extraPorts": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "string" + }, + "extraVolumes": { + "type": "string" + }, + "fullnameOverride": { + "type": "string" + }, + "hostAliases": { + "type": "array" + }, + "image": { + "$ref": "#/definitions/image" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "ingress": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "paths": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "servicePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ] + }, + "tls": { + "type": "array", + "items": { + "type": "object", + "properties": { + "hosts": { + "type": "array", + "items": { + "items": { + "type": "string" + } + }, + "secretName": { + "type": "string" + } + } + } + } + } + }, + "lifecycleHooks": { + "type": "string" + }, + "livenessProbe": { + "type": "string" + }, + "nameOverride": { + "type": "string" + }, + "nodeSelector": { + "type": "object" + }, + "pgchecker": { + "type": "object", + "properties": { + "image": { + "$ref": "#/definitions/image" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "securityContext": { + "type": "object" + } + } + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podManagementPolicy": { + "type": "string" + }, + "podSecurityContext": { + "type": "object" + }, + "postgresql": { + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "prometheusRule": { + "type": "object" + }, + "serviceMonitor": { + "type": "object" + }, + "extraServiceMonitor": { + "type": "object" + }, + "readinessProbe": { + "type": "string" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "restartPolicy": { + "type": "string" + }, + "route": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "host": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "path": { + "type": "string" + }, + "tls": { + "type": "object" + } + } + }, + "secrets": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "service": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "extraPorts": { + "type": "array" + }, + "loadBalancerSourceRanges": { + "type": "array" + }, + "httpNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpPort": { + "type": "integer" + }, + "httpsNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpsPort": { + "type": "integer" + }, + "httpManagementNodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "httpManagementPort": { + "type": "integer" + }, + "labels": { + "type": "object" + }, + "nodePort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "type": { + "type": "string" + }, + "loadBalancerIP": { + "type": "string" + }, + "sessionAffinity": { + "type": "string" + }, + "sessionAffinityConfig": { + "type": "object" + } + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "create": { + "type": "boolean" + }, + "imagePullSecrets": { + "$ref": "#/definitions/imagePullSecrets" + }, + "labels": { + "type": "object" + }, + "name": { + "type": "string" + } + } + }, + "rbac": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "rules": { + "type": "array" + } + } + }, + "startupScripts": { + "type": "object" + }, + "statefulsetAnnotations": { + "type": "object" + }, + "statefulsetLabels": { + "type": "object" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "autoscaling": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "labels": { + "type": "object" + }, + "minReplicas": { + "type": "integer" + }, + "maxReplicas": { + "type": "integer" + }, + "metrics": { + "type": "array" + }, + "behavior": { + "type": "object" + } + } + }, + "test": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "image": { + "$ref": "#/definitions/image" + }, + "podSecurityContext": { + "type": "object" + }, + "securityContext": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array" + } + } + } +} diff --git a/roles/cmoa_install/files/04-keycloak/values.yaml b/roles/cmoa_install/files/04-keycloak/values.yaml new file mode 100644 index 0000000..a95521f --- /dev/null +++ b/roles/cmoa_install/files/04-keycloak/values.yaml @@ -0,0 +1,552 @@ +# Optionally override the fully qualified name +fullnameOverride: "imxc-keycloak" + +# Optionally override the name +nameOverride: "" + +# The number of replicas to create (has no effect if autoscaling enabled) +replicas: 2 + +image: + # The Keycloak image repository + #repository: cdm-dev.exem-oss.org/keycloak/keycloak + repository: 10.10.31.243:5000/cmoa3/keycloak + # Overrides the Keycloak image tag whose default is the chart version + tag: "11.0.1" + # The Keycloak image pull policy + pullPolicy: Always + +# Image pull secrets for the Pod +#imagePullSecrets: [] +# - name: myRegistrKeySecretName +imagePullSecrets: + - name: regcred + +# Mapping between IPs and hostnames that will be injected as entries in the Pod's hosts files +hostAliases: [] +# - ip: "1.2.3.4" +# hostnames: +# - "my.host.com" + +# Indicates whether information about services should be injected into Pod's environment variables, matching the syntax of Docker links +enableServiceLinks: true + +# Pod management policy. One of `Parallel` or `OrderedReady` +podManagementPolicy: Parallel + +# Pod restart policy. One of `Always`, `OnFailure`, or `Never` +restartPolicy: Always + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Additional annotations for the ServiceAccount + annotations: {} + # Additional labels for the ServiceAccount + labels: {} + # Image pull secrets that are attached to the ServiceAccount + #imagePullSecrets: [] + imagePullSecrets: + - name: regcred + +rbac: + create: true + rules: + # RBAC rules for KUBE_PING + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + +# SecurityContext for the entire Pod. Every container running in the Pod will inherit this SecurityContext. This might be relevant when other components of the environment inject additional containers into running Pods (service meshes are the most prominent example for this) +podSecurityContext: + fsGroup: 1000 + +# SecurityContext for the Keycloak container +securityContext: + runAsUser: 1000 + runAsNonRoot: true + +# Additional init containers, e. g. for providing custom themes +extraInitContainers: | + - name: theme-provider + image: 10.10.31.243:5000/cmoa3/theme-provider:latest + imagePullPolicy: IfNotPresent + command: + - sh + args: + - -c + - | + echo "Copying theme ..." + cp -R /mytheme/* /theme + volumeMounts: + - name: theme + mountPath: /theme + +#extraInitContainers: "" + +# Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy +extraContainers: "" + +# Lifecycle hooks for the Keycloak container +lifecycleHooks: | +# postStart: +# exec: +# command: +# - /bin/sh +# - -c +# - ls + +# Termination grace period in seconds for Keycloak shutdown. Clusters with a large cache might need to extend this to give Infinispan more time to rebalance +terminationGracePeriodSeconds: 60 + +# The internal Kubernetes cluster domain +clusterDomain: cluster.local + +## Overrides the default entrypoint of the Keycloak container +command: [] + +## Overrides the default args for the Keycloak container +#args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled", "-Dkeycloak.profile.feature.admin_fine_grained_authz=enabled"] +args: ["-Dkeycloak.profile.feature.scripts=enabled", "-Dkeycloak.profile.feature.upload_scripts=enabled"] + +# Additional environment variables for Keycloak +extraEnv: | + # HA settings + - name: PROXY_ADDRESS_FORWARDING + value: "true" + - name: JGROUPS_DISCOVERY_PROTOCOL + value: kubernetes.KUBE_PING + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CACHE_OWNERS_COUNT + value: "2" + - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + value: "2" + # postgresql settings + - name: DB_VENDOR + value: postgres + - name: DB_ADDR + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: keycloak + - name: DB_USER + value: admin + - name: DB_PASSWORD + value: eorbahrhkswp +# - name: KEYCLOAK_USER +# value: keycloak +# - name: KEYCLOAK_PASSWORD +# value: keycloak +#extraEnv: "" + # - name: KEYCLOAK_LOGLEVEL + # value: DEBUG + # - name: WILDFLY_LOGLEVEL + # value: DEBUG + # - name: CACHE_OWNERS_COUNT + # value: "2" + # - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT + # value: "2" +#extraEnv: | +# - name: JGROUPS_DISCOVERY_PROTOCOL +# value: dns.DNS_PING +# - name: JGROUPS_DISCOVERY_PROPERTIES +# value: 'dns_query={{ include "keycloak.serviceDnsName" . }}' +# - name: CACHE_OWNERS_COUNT +# value: "2" +# - name: CACHE_OWNERS_AUTH_SESSIONS_COUNT +# value: "2" +# Additional environment variables for Keycloak mapped from Secret or ConfigMap +extraEnvFrom: "" + +# Pod priority class name +#priorityClassName: "manual" + +# Pod affinity +affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 10 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + {{- include "keycloak.selectorLabels" . | nindent 12 }} + matchExpressions: + - key: app.kubernetes.io/component + operator: NotIn + values: + - test + topologyKey: failure-domain.beta.kubernetes.io/zone + +#affinity: {} + +# Node labels for Pod assignment +nodeSelector: {} + +# Node taints to tolerate +tolerations: [] + +# Additional Pod labels +podLabels: {} + +# Additional Pod annotations +podAnnotations: {} + +# Liveness probe configuration +livenessProbe: | + httpGet: + path: /auth/ + port: http + initialDelaySeconds: 300 + timeoutSeconds: 5 + +# Readiness probe configuration +readinessProbe: | + httpGet: + path: /auth/realms/master + port: http + initialDelaySeconds: 30 + timeoutSeconds: 1 + +# Pod resource requests and limits +#resources: {} + # requests: + # cpu: "500m" + # memory: "1024Mi" + # limits: + # cpu: "500m" + # memory: "1024Mi" +resources: + requests: + memory: "200Mi" + cpu: "10m" + +# Startup scripts to run before Keycloak starts up +startupScripts: + # WildFly CLI script for configuring the node-identifier + keycloak.cli: | + {{- .Files.Get "scripts/keycloak.cli" }} + # mystartup.sh: | + # #!/bin/sh + # + # echo 'Hello from my custom startup script!' + +# Add additional volumes, e. g. for custom themes +extraVolumes: | + - name: theme + emptyDir: {} +#extraVolumes: "" + +# Add additional volumes mounts, e. g. for custom themes +extraVolumeMounts: | + - name: theme + mountPath: /opt/jboss/keycloak/themes +#extraVolumeMounts: "" + +# Add additional ports, e. g. for admin console or exposing JGroups ports +extraPorts: [] + +# Pod disruption budget +podDisruptionBudget: {} +# maxUnavailable: 1 +# minAvailable: 1 + +# Annotations for the StatefulSet +statefulsetAnnotations: {} + +# Additional labels for the StatefulSet +statefulsetLabels: {} + +# Configuration for secrets that should be created +secrets: {} + # mysecret: + # type: {} + # annotations: {} + # labels: {} + # stringData: {} + # data: {} + +service: + # Annotations for headless and HTTP Services + annotations: {} + # Additional labels for headless and HTTP Services + labels: {} + # key: value + # The Service type + type: NodePort + # Optional IP for the load balancer. Used for services of type LoadBalancer only + loadBalancerIP: "" + # The http Service port + httpPort: 80 + # The HTTP Service node port if type is NodePort + httpNodePort: 31082 + # The HTTPS Service port + httpsPort: 8443 + # The HTTPS Service node port if type is NodePort + httpsNodePort: null + # The WildFly management Service port + httpManagementPort: 9990 + # The WildFly management Service node port if type is NodePort + httpManagementNodePort: 31990 + # Additional Service ports, e. g. for custom admin console + extraPorts: [] + # When using Service type LoadBalancer, you can restrict source ranges allowed + # to connect to the LoadBalancer, e. g. will result in Security Groups + # (or equivalent) with inbound source ranges allowed to connect + loadBalancerSourceRanges: [] + # Session affinity + # See https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-userspace + sessionAffinity: "" + # Session affinity config + sessionAffinityConfig: {} + +ingress: + # If `true`, an Ingress is created + enabled: false + # The Service port targeted by the Ingress + servicePort: http + # Ingress annotations + annotations: {} + ## Resolve HTTP 502 error using ingress-nginx: + ## See https://www.ibm.com/support/pages/502-error-ingress-keycloak-response + # nginx.ingress.kubernetes.io/proxy-buffer-size: 128k + + # Additional Ingress labels + labels: {} + # List of rules for the Ingress + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - / + # TLS configuration + tls: + - hosts: + - keycloak.example.com + secretName: "" + + # ingress for console only (/auth/admin) + console: + # If `true`, an Ingress is created for console path only + enabled: false + # Ingress annotations for console ingress only + # Useful to set nginx.ingress.kubernetes.io/whitelist-source-range particularly + annotations: {} + rules: + - + # Ingress host + host: '{{ .Release.Name }}.keycloak.example.com' + # Paths for the host + paths: + - /auth/admin/ + +## Network policy configuration +networkPolicy: + # If true, the Network policies are deployed + enabled: false + + # Additional Network policy labels + labels: {} + + # Define all other external allowed source + # See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#networkpolicypeer-v1-networking-k8s-io + extraFrom: [] + +route: + # If `true`, an OpenShift Route is created + enabled: false + # Path for the Route + path: / + # Route annotations + annotations: {} + # Additional Route labels + labels: {} + # Host name for the Route + host: "" + # TLS configuration + tls: + # If `true`, TLS is enabled for the Route + enabled: false + # Insecure edge termination policy of the Route. Can be `None`, `Redirect`, or `Allow` + insecureEdgeTerminationPolicy: Redirect + # TLS termination of the route. Can be `edge`, `passthrough`, or `reencrypt` + termination: edge + +pgchecker: + image: + # Docker image used to check Postgresql readiness at startup + #repository: cdm-dev.exem-oss.org/keycloak/busybox + #repository: {{ .Values.global.IMXC_REGISTRY }}/keycloak/busybox + repository: 10.10.31.243:5000/cmoa3/busybox + # Image tag for the pgchecker image + tag: 1.32 + # Image pull policy for the pgchecker image + pullPolicy: Always + # SecurityContext for the pgchecker contai/docker.ner + securityContext: + allowPrivilegeEscalation: false + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + # Resource requests and limits for the pgchecker container + resources: + requests: + cpu: "10m" + memory: "16Mi" + limits: + cpu: "10m" + memory: "16Mi" + +postgresql: + # If `true`, the Postgresql dependency is enabled + enabled: false + # PostgreSQL User to create + postgresqlUsername: keycloak + # PostgreSQL Password for the new user + postgresqlPassword: keycloak + # PostgreSQL Database to create + postgresqlDatabase: keycloak + # PostgreSQL network policy configuration + networkPolicy: + enabled: false + +serviceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /metrics + # The Service port at which metrics are served + port: http-management + +extraServiceMonitor: + # If `true`, a ServiceMonitor resource for the prometheus-operator is created + enabled: false + # Optionally sets a target namespace in which to deploy the ServiceMonitor resource + namespace: "" + # Optionally sets a namespace for the ServiceMonitor + namespaceSelector: {} + # Annotations for the ServiceMonitor + annotations: {} + # Additional labels for the ServiceMonitor + labels: {} + # Interval at which Prometheus scrapes metrics + interval: 10s + # Timeout for scraping + scrapeTimeout: 10s + # The path at which metrics are served + path: /auth/realms/master/metrics + # The Service port at which metrics are served + port: http + +prometheusRule: + # If `true`, a PrometheusRule resource for the prometheus-operator is created + enabled: false + # Annotations for the PrometheusRule + annotations: {} + # Additional labels for the PrometheusRule + labels: {} + # List of rules for Prometheus + rules: [] + # - alert: keycloak-IngressHigh5xxRate + # annotations: + # message: The percentage of 5xx errors for keycloak over the last 5 minutes is over 1%. + # expr: | + # ( + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak",status=~"5[0-9]{2}"}[1m] + # ) + # ) + # / + # sum( + # rate( + # nginx_ingress_controller_response_duration_seconds_count{exported_namespace="mynamespace",ingress="mynamespace-keycloak"}[1m] + # ) + # ) + # ) * 100 > 1 + # for: 5m + # labels: + # severity: warning + +autoscaling: + # If `true`, a autoscaling/v2beta2 HorizontalPodAutoscaler resource is created (requires Kubernetes 1.18 or above) + # Autoscaling seems to be most reliable when using KUBE_PING service discovery (see README for details) + # This disables the `replicas` field in the StatefulSet + enabled: false + # Additional HorizontalPodAutoscaler labels + labels: {} + # The minimum and maximum number of replicas for the Keycloak StatefulSet + minReplicas: 3 + maxReplicas: 10 + # The metrics to use for scaling + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + # The scaling policy to use. This will scale up quickly but only scale down a single Pod per 5 minutes. + # This is important because caches are usually only replicated to 2 Pods and if one of those Pods is terminated this will give the cluster time to recover. + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 300 + +test: + # If `true`, test resources are created + enabled: false + image: + # The image for the test Pod + #repository: docker.io/unguiculus/docker-python3-phantomjs-selenium + repository: 10.10.31.243:5000/docker-python3-phantomjs-selenium + # The tag for the test Pod image + tag: v1 + # The image pull policy for the test Pod image + pullPolicy: IfNotPresent + # SecurityContext for the entire test Pod + podSecurityContext: + fsGroup: 1000 + # SecurityContext for the test container + securityContext: + runAsUser: 1000 + runAsNonRoot: true + diff --git a/roles/cmoa_install/files/05-imxc/Chart.yaml b/roles/cmoa_install/files/05-imxc/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/roles/cmoa_install/files/05-imxc/cmoa-manual.yaml b/roles/cmoa_install/files/05-imxc/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/roles/cmoa_install/files/05-imxc/scripts/init-api-server.sh b/roles/cmoa_install/files/05-imxc/scripts/init-api-server.sh new file mode 100644 index 0000000..78a9962 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/scripts/init-api-server.sh @@ -0,0 +1,17 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + /sbin/tini -- java -Djava.security.egd=file:/dev/./urandom -jar /app.jar + #java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/roles/cmoa_install/files/05-imxc/scripts/init-auth-server.sh b/roles/cmoa_install/files/05-imxc/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/roles/cmoa_install/files/05-imxc/scripts/init-noti-server.sh b/roles/cmoa_install/files/05-imxc/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/roles/cmoa_install/files/05-imxc/scripts/init-resource.sh b/roles/cmoa_install/files/05-imxc/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/roles/cmoa_install/files/05-imxc/scripts/init.json b/roles/cmoa_install/files/05-imxc/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/roles/cmoa_install/files/05-imxc/templates/auth-server.yaml b/roles/cmoa_install/files/05-imxc/templates/auth-server.yaml new file mode 100644 index 0000000..fb8fe7b --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/auth-server.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: auth-server + namespace: imxc +spec: + selector: + matchLabels: + app: auth + replicas: 1 + template: + metadata: + labels: + app: auth + spec: + initContainers: + - name: init-resource + image: {{ .Values.global.IMXC_IN_REGISTRY }}/init-resource:latest + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: ['chmod -R 777 /scripts; cp /scripts/init.json /tmp/init.json'] + volumeMounts: + - name: init + mountPath: /tmp + containers: + - name: auth-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/auth-server:{{ .Values.global.AUTH_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-auth-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # imxc-api-server configuration + - name: IMXC_API-SERVER-URL + value: http://imxc-api-service:8080 + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_REPO + value: debug + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_AUTH_AUTHENTICATION_USER_SERVICE + value: debug + # 현대카드는 커스텀으로 해당 값 추가. keycloak만 사용(true), keycloak+내부db 사용(false) + - name: IMXC_KEYCLOAK_ENABLED + value: "true" + + volumeMounts: + - name: init + mountPath: /tmp + resources: + requests: + memory: "200Mi" + cpu: "10m" + + volumes: + - name: init + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: auth-server-service + namespace: imxc +spec: + type: ClusterIP + selector: + app: auth + ports: + - protocol: TCP + port: 8480 + # nodePort: 15016 diff --git a/roles/cmoa_install/files/05-imxc/templates/cloudmoa-datagate.yaml b/roles/cmoa_install/files/05-imxc/templates/cloudmoa-datagate.yaml new file mode 100644 index 0000000..cbbee9a --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/cloudmoa-datagate.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + selector: + matchLabels: + app: datagate + replicas: 2 + template: + metadata: + labels: + app: datagate + spec: + containers: + - image: {{ .Values.global.IMXC_IN_REGISTRY }}/datagate:{{ .Values.global.DATAGATE_VERSION }} + imagePullPolicy: IfNotPresent + name: datagate + ports: + - containerPort: 50051 + protocol: TCP + - containerPort: 14268 + protocol: TCP + - containerPort: 14269 + protocol: TCP + readinessProbe: + httpGet: + path: "/" + port: 14269 + env: + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: STORAGE_TYPE + value: kafka + - name: KAFKA_PRODUCER_BROKERS + value: kafka-broker:9094 + - name: LOG_LEVEL + value: "INFO" + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "2000m" + memory: "1Gi" +--- +apiVersion: v1 +kind: Service +metadata: + name: datagate + namespace: imxc + labels: + app: datagate +spec: + ports: + - name: datagate-grpc + port: 50051 + protocol: TCP + targetPort: 50051 + nodePort: 30051 + - name: datagate-http + port: 14268 + targetPort: 14268 +# nodePort: 31268 + - name: datagate-readiness + port: 14269 + targetPort: 14269 + selector: + app: datagate + type: NodePort diff --git a/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml b/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml new file mode 100644 index 0000000..45c3d41 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-agent.yaml @@ -0,0 +1,331 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + selector: + matchLabels: + app: metric-agent + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: metric-agent + spec: + containers: + - name: metric-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-agent:{{ .Values.global.METRIC_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14271 + - containerPort: 14272 + args: + - --config.file=/etc/metric-agent/metric-agent.yml + env: + - name: STORAGE_TYPE + value: datagate + - name: DATAGATE + value: datagate:50051 + - name: CLUSTER_ID + value: cloudmoa +# - name: USER_ID +# value: mskim@ex-em.com + volumeMounts: + - mountPath: /etc/metric-agent/ + name: config-volume + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "1000Mi" + cpu: "300m" + volumes: + - name: config-volume + configMap: + name: metric-agent-config + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-agent + namespace: imxc + labels: + app: metric-agent +spec: + ports: + - name: metric + port: 14271 + targetPort: 14271 + selector: + app: metric-agent + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: metric-agent-config + namespace: imxc +data: + metric-agent.yml: | + global: + scrape_interval: 10s + evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute. + + scrape_configs: + - job_name: 'kubernetes-kubelet' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_clst_id + replacement: 'cloudmoa' + - target_label: xm_entity_type + replacement: 'Node' + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (kubelet_running_pod_count) + action: keep + + + - job_name: 'kubernetes-node-exporter' + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - source_labels: [__meta_kubernetes_role] + action: replace + target_label: kubernetes_role + - source_labels: [__address__] + regex: '(.*):10250' + replacement: '${1}:9100' + target_label: __address__ + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ + # set "name" value to "job" + - source_labels: [job] + regex: 'kubernetes-(.*)' + replacement: '${1}' + target_label: name + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Node' + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: xm_namespace + replacement: $1 + action: replace + + # added by mskim 8/19 + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (node_boot_time_seconds|node_context_switches_total|node_cpu_frequency_max_hertz|node_cpu_package_throttles_total|node_cpu_seconds_total|node_disk_io_time_seconds_total|node_disk_read_bytes_total|node_disk_read_time_seconds_total|node_disk_reads_completed_total|node_disk_write_time_seconds_total|node_disk_writes_completed_total|node_disk_written_bytes_total|node_filefd_allocated|node_filesystem_avail_bytes|node_filesystem_free_bytes|node_filesystem_size_bytes|node_load1|node_load15|node_load5|node_memory_Active_bytes|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_memory_SwapCached_bytes|node_memory_SwapFree_bytes|node_memory_SwapTotal_bytes|node_network_receive_bytes_total|node_network_receive_bytes_total|node_network_transmit_bytes_total|node_network_transmit_bytes_total) + action: keep + + - job_name: 'kubernetes-cadvisor' + scheme: https + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + metrics_path: /metrics/cadvisor + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: xm_node_id + - target_label: xm_entity_type + replacement: 'Container' + +{{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod] + target_label: xm_pod_id + - source_labels: [container] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep + + {{- else }} + metric_relabel_configs: + - source_labels: [namespace] + target_label: xm_namespace + - source_labels: [pod_name] + target_label: xm_pod_id + - source_labels: [container_name] + target_label: xm_cont_name + - source_labels: [id] + target_label: xm_cont_id + # added by mskim 8/19 + - source_labels: [ __name__ ] + regex: (container_cpu_cfs_throttled_seconds_total|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_user_seconds_total|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_usage_bytes|container_fs_writes_bytes_total|container_last_seen|container_memory_cache|container_memory_max_usage_bytes|container_memory_swap|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_spec_memory_limit_bytes) + action: keep +{{- end }} + # CLOUD-8671 | 데이터 필터링 설정 추가 + - source_labels: [ __name__, image ] + separator: "@" + regex: "container_cpu.*@" + action: drop + - source_labels: [ __name__, name ] + separator: "@" + regex: "container_memory.*@" + action: drop + + - job_name: 'kafka-consumer' + metrics_path: /remote_prom + scrape_interval: 5s + scrape_timeout: 5s + scheme: kafka + static_configs: + - targets: ['kafka-broker:9094'] + params: + #server_addrs: ['broker.default.svc.k8s:9094'] + server_addrs: ['kafka-broker:9094'] + encoding: [proto3] + contents: [remote_write] + compression: [snappy] + group: [remote-write-consumer] + workers: [50] + + # job for API server (SpringBoot) commented by ersione 2019-09-19 + - job_name: 'imxc-api' + metrics_path: '/actuator/prometheus' + scrape_interval: 5s + static_configs: + - targets: ['imxc-api-service:8080'] + - job_name: 'imxc-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] + #- job_name: 'imxc-auth' + # metrics_path: '/actuator/prometheus' + # scrape_interval: 15s + # static_configs: + # - targets: ['auth-server-service:8480'] + + + + - job_name: 'alertmanager-exporter' + metrics_path: '/metrics' + scrape_interval: 5s + static_configs: + - targets: ['alertmanager:9093'] + + + # modified by seungtak choi 2020-02-18 + - job_name: 'cmoa-collector' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: cmoa-collector + + # added by dwkim 2021-03-15 + - job_name: 'elasticsearch' + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + relabel_configs: + - target_label: xm_clst_id + replacement: 'cloudmoa' + - source_labels: [__meta_kubernetes_pod_node_name] + target_label: xm_node_id + - source_labels: [__meta_kubernetes_namespace] + target_label: xm_namespace + - source_labels: [__meta_kubernetes_service_name] + action: keep + regex: es-exporter-elasticsearch-exporter + + # kafka-exporter prometheus 수집 룰 추가 + - job_name: 'kafka-exporter' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9308' + + # kafka-jmx-exporter configuration yaml 수집룰 추가 + - job_name: 'kafka-jmx' + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - imxc + scheme: http + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: labelmap + regex: __meta_kubernetes_service_annotation_(.+) + - source_labels: [__meta_kubernetes_pod_container_port_number] + action: keep + regex: '(.*)9010' + + # job for API Server(Spring Cloud Notification Server) commented by hjyoon 2022-01-26 + - job_name: 'cmoa-noti' + metrics_path: '/actuator/prometheus' + scrape_interval: 15s + static_configs: + - targets: ['noti-server-service:8080'] diff --git a/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml b/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml new file mode 100644 index 0000000..3d7acc8 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/cloudmoa-metric-collector.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + selector: + matchLabels: + app: metric-collector + replicas: 3 + template: + metadata: + labels: + app: metric-collector + spec: + containers: + - name: metric-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/metric-collector:{{ .Values.global.METRIC_COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 14270 + env: + - name: KAFKA_CONSUMER_BROKERS + value: kafka-broker:9094 + - name: HTTP_PUSH + value: http://base-cortex-nginx/api/v1/push + securityContext: + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + name: metric-collector + namespace: imxc + labels: + app: metric-collector +spec: + ports: + - name: metric + port: 14270 + targetPort: 14270 + selector: + app: metric-collector diff --git a/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml b/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml new file mode 100644 index 0000000..b20fed2 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-batch.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-batch + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-batch +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-batch + template: + metadata: + labels: + app: cmoa-kube-info-batch + spec: + containers: + - name: cmoa-kube-info-batch + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-batch:{{ .Values.global.KUBE_INFO_BATCH_VERSION }} + imagePullPolicy: Always + env: + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: DELETE_HOUR + value: '{{ .Values.global.DELETE_HOUR }}' diff --git a/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml b/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml new file mode 100644 index 0000000..cad91b9 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-connector.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-connector + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-connector +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-connector + template: + metadata: + labels: + app: cmoa-kube-info-connector + spec: + containers: + - name: cmoa-kube-info-connector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-connector:{{ .Values.global.KUBE_INFO_CONNECTOR_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_GROUP_ID + value: cmoa-kube-info-connector + - name: KAFKA_SERVER + value: kafka:9092 + - name: JDBC_KIND + value: {{ .Values.global.JDBC_KIND }} + - name: JDBC_SERVER + value: {{ .Values.global.JDBC_SERVER }} + - name: JDBC_DB + value: {{ .Values.global.JDBC_DB }} + - name: JDBC_USER + value: {{ .Values.global.JDBC_USER }} + - name: JDBC_PWD + value: {{ .Values.global.JDBC_PWD }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + - name: MAX_POLL_RECORDS_CONFIG + value: "300" + - name: MAX_POLL_INTERVAL_MS_CONFIG + value: "600000" + - name: SESSION_TIMEOUT_MS_CONFIG + value: "60000" + - name: MAX_PARTITION_FETCH_BYTES_CONFIG + value: "5242880" diff --git a/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml b/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml new file mode 100644 index 0000000..6f77ee5 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/cmoa-kube-info-flat.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmoa-kube-info-flat + namespace: {{ .Values.global.IMXC_NAMESPACE }} + labels: + app: cmoa-kube-info-flat +spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-kube-info-flat + template: + metadata: + labels: + app: cmoa-kube-info-flat + spec: + containers: + - name: cmoa-kube-info-flat + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kube-info-flat:{{ .Values.global.KUBE_INFO_FLAT_VERSION }} + imagePullPolicy: Always + env: + - name: KAFKA_SERVER + value: kafka:9092 + - name: KAFKA_INPUT_TOPIC + value: {{ .Values.global.KAFKA_INPUT_TOPIC }} + - name: TABLE_PREFIX + value: {{ .Values.global.TABLE_PREFIX }} + - name: BLACK_LIST + value: {{ .Values.global.BLACK_LIST }} + resources: + limits: + memory: 1Gi + requests: + memory: 200Mi diff --git a/roles/cmoa_install/files/05-imxc/templates/cmoa-manual.yaml b/roles/cmoa_install/files/05-imxc/templates/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/roles/cmoa_install/files/05-imxc/templates/eureka-server.yaml b/roles/cmoa_install/files/05-imxc/templates/eureka-server.yaml new file mode 100644 index 0000000..5ffd9c2 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/eureka-server.yaml @@ -0,0 +1,60 @@ +apiVersion: v1 +kind: Service +metadata: + name: eureka + namespace: imxc + labels: + app: eureka +spec: + type: NodePort + ports: + - port: 8761 + targetPort: 8761 + nodePort: 30030 + name: eureka + selector: + app: eureka +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: eureka + namespace: imxc +spec: + serviceName: 'eureka' + replicas: 3 + selector: + matchLabels: + app: eureka + template: + metadata: + labels: + app: eureka + spec: + containers: + - name: eureka + image: {{ .Values.global.IMXC_IN_REGISTRY }}/eureka-server:{{ .Values.global.EUREKA_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8761 + #resources: + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "1200Mi" + # cpu: "500m" + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka-0.eureka:8761/eureka/,http://eureka-1.eureka:8761/eureka/,http://eureka-2.eureka:8761/eureka/ + - name: JVM_OPTS + value: "-Xms1g -Xmx1g" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "100Mi" + cpu: "20m" diff --git a/roles/cmoa_install/files/05-imxc/templates/imxc-api-server.yaml b/roles/cmoa_install/files/05-imxc/templates/imxc-api-server.yaml new file mode 100644 index 0000000..de967a6 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/imxc-api-server.yaml @@ -0,0 +1,245 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-api-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-api + ports: + - protocol: TCP + name: api + port: 8080 + targetPort: 8080 + nodePort: 32080 + - protocol: TCP + name: netty + port: 10100 + targetPort: 10100 + nodePort: 31100 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-api + namespace: imxc + labels: + app: imxc-api +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-api + template: + metadata: + labels: + app: imxc-api + build: develop + spec: + securityContext: + #runAsNonRoot: true + runAsUser: 1577 + initContainers: + - name: cloudmoa-api-permission-fix + image: {{ .Values.global.IMXC_IN_REGISTRY }}/busybox:latest + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 0 +# - sh +# - -c +# - "chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log" + volumeMounts: + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + containers: + - name: imxc-api + image: {{ .Values.global.IMXC_IN_REGISTRY }}/api-server:{{ .Values.global.API_SERVER_VERSION }} + resources: + requests: + cpu: 200m + memory: 500Mi + limits: + cpu: 2000m + memory: 5000Mi + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-api-server.sh" | quote }}] + env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + - name: SPRING_DATAGATE_URLS + value: "{{ .Values.global.DATAGATE_INSIDE_IP }}" + - name: SPRING_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_INSIDE_PORT }}" + - name: SPRING_REDIS_URLS + value: {{ .Values.global.REDIS_URLS }} + - name: SPRING_REDIS_PORT + value: "{{ .Values.global.REDIS_PORT }}" + - name: SPRING_REDIS_PASSWORD + value: {{ .Values.global.REDIS_PASSWORD }} + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + - name: SPRING_BOOT_ADMIN_CLIENT_URL + value: http://{{ .Values.global.IMXC_ADMIN_SERVER_DNS }}:8888 + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_NAME + value: Intermax Cloud API Server + - name: SPRING_BOOT_ADMIN_CLIENT_ENABLED + value: "false" + - name: OPENTRACING_JAEGER_ENABLED + value: "false" + - name: SPRING_JPA_PROPERTIES_HIBERNATE_GENERATE_STATISTICS + value: "false" + - name: IMXC_REPORT_ENABLED + value: "true" + - name: IMXC_ALERT_PERSIST + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_ENVIRONMENT + value: Demo + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_PREFERIP + value: "true" + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_NODENAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: SPRING_BOOT_ADMIN_CLIENT_INSTANCE_METADATA_TAGS_PODNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: SPRING_BOOT_ADMIN_CLIENT_AUTODEREGISTRATION + value: "true" + - name: SPRING_JPA_HIBERNATE_DDL-AUTO + value: validate + - name: KEYCLOAK_AUTH-SERVER-URL + value: "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}" + - name: KEYCLOAK_REALM + value: exem + - name: KEYCLOAK_RESOURCE + value: "{{ .Values.global.KEYCLOAK_RESOURCE }}" + - name: SPRING_KEYCLOAK_MASTER_USERNAME + value: "{{ .Values.global.KEYCLOAK_MASTER_USERNAME }}" + - name: SPRING_KEYCLOAK_MASTER_PASSWORD + value: "{{ .Values.global.KEYCLOAK_MASTER_PASSWORD }}" + - name: SPRING_LDAP_USE + value: "{{ .Values.global.IMXC_LDAP_USE }}" + - name: TIMEZONE + value: Asia/Seoul + - name: IMXC_PROMETHEUS_URL + value: http://base-cortex-nginx/prometheus + - name: IMXC_PROMETHEUS_NAMESPACE + value: "imxc" + - name: LOGGING_LEVEL_ROOT + value: info + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + #R30020210730 추가 :: 현대카드는 true로 설정 + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-HOST + value: "exemmail1.ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PORT + value: "587" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-USERNAME + value: "imxc@ex-em.com" + - name: IMXC_ALERT_NOTIFICATION_MAIL_MAIL-PASSWORD + value: "1234" + - name: IMXC_ALERT_NOTIFICATION_MAIL_PROTOCOL + value: "smtp" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-REQ + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_STARTTLS-ENB + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_SMTP-AUTH + value: "true" + - name: IMXC_ALERT_NOTIFICATION_MAIL_DEBUG + value: "true" + - name: IMXC_ANOMALY_BLACK-LIST + value: "false" + - name: IMXC_VERSION_SAAS + value: "false" + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_API_SERVER_KUBERNETES_SERVICE + value: info + - name: IMXC_WEBSOCKET_SCHEDULE_PERIOD_5SECOND + value: "30000" + - name: IMXC_CACHE_INFO_1MCACHE + value: "0 0/1 * * * ?" + - name: IMXC_EXECUTION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_PERMISSION_LOG_USE + value: "false" + - name: IMXC_EXECUTION_CODE-LOG_USE + value: "false" + - name: IMXC_PORTAL_INFO_URL + value: "{{ .Values.global.IMXC_PORTAL_INFO_URL }}" + # Do not remove below rows related to AGENT-INSTALL. Added by youngmin 2021-03-29. + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_IP + value: {{ .Values.global.KAFKA_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_KAFKA_INTERFACE-PORT + value: "{{ .Values.global.KAFKA_INTERFACE_PORT }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_IP + value: {{ .Values.global.IMXC_API_SERVER_IP }} + - name: AGENT-INSTALL_COLLECTION-SERVER_APISERVER_NETTY-PORT + value: "{{ .Values.global.APISERVER_NETTY_PORT }}" + - name: AGENT-INSTALL_REGISTRY_URL + value: {{ .Values.global.IMXC_IN_REGISTRY }} + - name: AGENT-INSTALL_IMAGE_TAG + value: {{ .Values.global.AGENT_IMAGE_TAG }} + - name: AGENT-INSTALL_JAEGER_AGENT_CLUSTERIP + value: {{ .Values.global.JAEGER_AGENT_CLUSTERIP }} + - name: AGENT-INSTALL_JAEGER_JAVA-SPECIALAGENT-CLASSPATH + value: {{ .Values.global.JAEGER_JAVA_SPECIALAGENT_CLASSPATH }} + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_IP + value: "{{ .Values.global.DATAGATE_OUTSIDE_IP }}" + - name: AGENT-INSTALL_COLLECTION-SERVER_DATAGATE_PORT + value: "{{ .Values.global.DATAGATE_OUTSIDE_PORT }}" + - name: IMXC_REST-CONFIG_MAX-CON + value: "200" + - name: IMXC_REST-CONFIG_MAX-CON-ROUTE + value: "65" + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + # Elasticsearch for Security + - name: SPRING_ELASTIC_SSL_USERNAME + value: "{{ .Values.global.CMOA_ES_ID }}" + - name: SPRING_ELASTIC_SSL_PASSWORD + value: "{{ .Values.global.CMOA_ES_PW }}" + - name: IMXC_BACK-LOGIN_ENABLED + value: "{{ .Values.global.BACKLOGIN }}" + volumeMounts: + - mountPath: /var/log/imxc-audit.log + name: auditlog + - mountPath: /home/cloudmoa/notification/cloudmoa_alert.log + name: notification-directory + - mountPath: /home/cloudmoa/notification/ + name: notification-upper-directory + volumes: + - name: auditlog + hostPath: + path: {{ .Values.global.AUDITLOG_PATH }}/imxc-audit.log + type: FileOrCreate + - name: notification-upper-directory + hostPath: + path: /home/ + type: DirectoryOrCreate + - name: notification-directory + hostPath: + path: /home/cloudmoa_event.log + type: FileOrCreate diff --git a/roles/cmoa_install/files/05-imxc/templates/imxc-collector.yaml b/roles/cmoa_install/files/05-imxc/templates/imxc-collector.yaml new file mode 100644 index 0000000..e125243 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/imxc-collector.yaml @@ -0,0 +1,79 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + replicas: 1 + selector: + matchLabels: + app: cmoa-collector + template: + metadata: + labels: + app: cmoa-collector + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - name: cmoa-collector + image: {{ .Values.global.IMXC_IN_REGISTRY }}/cmoa-collector:{{ .Values.global.COLLECTOR_VERSION }} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + memory: 500Mi + limits: + cpu: 500m + memory: 2500Mi + ports: + - containerPort: 12010 + env: + - name: LOCATION + value: Asia/Seoul + - name: KAFKA_SERVER + value: kafka:9092 + - name: ELASTICSEARCH + value: elasticsearch:9200 +# - name: PROMETHEUS +# value: nginx-cortex/prometheus + - name: REDIS_ADDR + value: redis-master:6379 + - name: REDIS_PW + value: dkagh1234! + - name: REDIS_DB + value: "0" + - name: REDIS_TYPE + value: normal + - name: CMOA_ES_ID + value: {{ .Values.global.CMOA_ES_ID }} + - name: CMOA_ES_PW + value: {{ .Values.global.CMOA_ES_PW }} + resources: + requests: + cpu: "300m" + memory: "1500Mi" + limits: + cpu: "500m" + memory: "2500Mi" +- apiVersion: v1 + kind: Service + metadata: + name: cmoa-collector + namespace: imxc + labels: + app: cmoa-collector + spec: + ports: + - name: cmoa-collector-exporter + port: 12010 + targetPort: 12010 + selector: + app: cmoa-collector + diff --git a/roles/cmoa_install/files/05-imxc/templates/noti-server.yaml b/roles/cmoa_install/files/05-imxc/templates/noti-server.yaml new file mode 100644 index 0000000..99c7a5b --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/noti-server.yaml @@ -0,0 +1,121 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: noti-server + namespace: imxc +spec: + selector: + matchLabels: + app: noti + replicas: 1 + template: + metadata: + labels: + app: noti + spec: + containers: + - name: noti-server + image: {{ .Values.global.IMXC_IN_REGISTRY }}/notification-server:{{ .Values.global.NOTI_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + command: ["sh", "-c", {{ .Files.Get "scripts/init-noti-server.sh" | quote }}] + env: + # spring profile + - name: SPRING_PROFILES_ACTIVE + value: prd + + # keycloak configuration + - name: KEYCLOAK_AUTH-SERVER-URL + value: {{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }} + - name: KEYCLOAK_REALM + value: exem + + # eureka configuration + - name: EUREKA_CLIENT_SERVICE-URL_DEFAULTZONE + value: http://eureka:8761/eureka + + # postgres configuration + - name: SPRING_DATASOURCE_URL + value: jdbc:log4jdbc:postgresql://postgres:5432/postgresdb + + # redis configuration + - name: SPRING_REDIS_HOST + value: redis-master + - name: SPRING_REDIS_PORT + value: "6379" + - name: SPRING_REDIS_PASSWORD + value: dkagh1234! + + # elasticsearch configuration + - name: SPRING_ELASTIC_URLS + value: elasticsearch + - name: SPRING_ELASTIC_PORT + value: "9200" + + # file I/O configuration + - name: IMXC_ALERT_NOTIFICATION_FILE_USE + value: "true" + - name: IMXC_ALERT_NOTIFICATION_FILE_FILE-LIMIT-SIZE-MB + value: "10" + - name: IMXC_ALERT_NOTIFICATION_FILE_PATH + value: /cloudmoa_noti + - name: IMXC_ALERT_NOTIFICATION_FILE_NAME + value: cloudmoa_alert.log + - name: IMXC_ALERT_NOTIFICATION_FILE_FORMAT + value: $[name]/($[level])/$[data]/$[message] + - name: IMXC_ALERT_NOTIFICATION_FILE_LEVELCONTRACT + value: "true" + + # rabbitmq configuration + - name: IMXC_RABBITMQ_HOST + value: base-rabbitmq + - name: IMXC_RABBITMQ_PORT + value: "61613" + - name: IMXC_RABBITMQ_CLIENT_ID + value: "user" + - name: IMXC_RABBITMQ_CLIENT_PASSWORD + value: "eorbahrhkswp" + - name: IMXC_RABBITMQ_SYSTEM_ID + value: "user" + - name: IMXC_RABBITMQ_SYSTEM_PASSWORD + value: "eorbahrhkswp" + + # api-server configuration + - name: IMXC_API-SERVER-URL + value: "http://imxc-api-service:8080" + + # cortex integration + - name: SPRING_CORTEX_URLS + value: base-cortex-configs + - name: SPRING_CORTEX_PORT + value: "8080" + + # alert webhook + - name: IMXC_ALERT_WEBHOOK_URLS + value: http://noti-server-service:8080/alert + + # etc configuration + - name: IMXC_PROMETHEUS_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + - name: IMXC_ALERT_KUBERNETES_NAMESPACE + value: {{ .Values.global.IMXC_NAMESPACE }} + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + resources: + requests: + memory: "100Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: noti-server-service + namespace: imxc +spec: + type: NodePort + selector: + app: noti + ports: + - protocol: TCP + port: 8080 + nodePort: 31083 diff --git a/roles/cmoa_install/files/05-imxc/templates/streams-depl.yaml b/roles/cmoa_install/files/05-imxc/templates/streams-depl.yaml new file mode 100644 index 0000000..b3223e5 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/streams-depl.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-stream-txntrend-deployment + namespace: imxc + labels: + app: kafka-stream-txntrend +spec: + replicas: 1 + selector: + matchLabels: + app: kafka-stream-txntrend + template: + metadata: + labels: + app: kafka-stream-txntrend + spec: + containers: + - name: kafka-stream-txntrend + image: {{ .Values.global.IMXC_IN_REGISTRY }}/kafka-stream-txntrend:{{ .Values.global.KAFKA_STREAM_VERSION }} + imagePullPolicy: IfNotPresent + env: + - name: SERVICE_KAFKA_HOST + value: kafka-broker:9094 + - name: SERVICE_STREAM_OUTPUT + value: jspd_txntrend diff --git a/roles/cmoa_install/files/05-imxc/templates/topology-agent.yaml b/roles/cmoa_install/files/05-imxc/templates/topology-agent.yaml new file mode 100644 index 0000000..80476a3 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/topology-agent.yaml @@ -0,0 +1,107 @@ +{{ if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: rbac.authorization.k8s.io/v1 +{{ else }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{ end }} +kind: ClusterRoleBinding +metadata: + name: topology-agent + namespace: imxc + labels: + k8s-app: topology-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: topology-agent + namespace: imxc +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: topology-agent + namespace: imxc + labels: + app: topology-agent +spec: + selector: + matchLabels: + app: topology-agent + template: + metadata: + labels: + app: topology-agent + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + # below appended + hostPID: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: topology-agent + image: {{ .Values.global.IMXC_IN_REGISTRY }}/topology-agent:{{ .Values.global.TOPOLOGY_AGENT_VERSION }} + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/usr/bin + name: bin-volume + - mountPath: /var/run/docker.sock + name: docker-volume + - mountPath: /host/proc + name: proc-volume + - mountPath: /root + name: root-volume + env: + - name: CLUSTER_ID + value: cloudmoa + - name: ROOT_DIRECTORY + value: /root + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: DATAGATE + value: datagate:50051 + - name: LOG_RNAME_USE + value: "false" + - name: LOG_LEVEL + value: "DEBUG" + - name: CLOUDMOA_SETTING_PATH + value: /home/cloudmoa/setting/ + resources: + requests: + memory: "125Mi" + cpu: "100m" + limits: + memory: "600Mi" + cpu: "500m" + volumes: + - name: bin-volume + hostPath: + path: /usr/bin + type: Directory + - name: docker-volume + hostPath: + path: /var/run/docker.sock + - name: proc-volume + hostPath: + path: /proc + - name: root-volume + hostPath: + path: / diff --git a/roles/cmoa_install/files/05-imxc/templates/zuul-server.yaml b/roles/cmoa_install/files/05-imxc/templates/zuul-server.yaml new file mode 100644 index 0000000..79969d7 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/templates/zuul-server.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zuul-deployment + namespace: imxc + labels: + app: cloud +spec: + selector: + matchLabels: + app: cloud + replicas: 1 + template: + metadata: + labels: + app: cloud + spec: + containers: + - env: + - name: SPRING_PROFILES_ACTIVE + value: prd + - name: SPRING_ZIPKIN_BASE-URL + value: http://zipkin-service:9411 + - name: LOGGING_LEVEL_COM_EXEM_CLOUD_ZUULSERVER_FILTERS_AUTHFILTER + value: info + # log4j + - name: LOG4J_FORMAT_MSG_NO_LOOKUPS + value: "true" + name: zuul + image: {{ .Values.global.IMXC_IN_REGISTRY }}/zuul-server:{{ .Values.global.ZUUL_SERVER_VERSION }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 + #- containerPort: 6831 + #protocol: UDP + #resources: + # requests: + # memory: "256Mi" + # cpu: "344m" + # limits: + # memory: "1Gi" + # cpu: "700m" + resources: + requests: + memory: "200Mi" + cpu: "50m" +--- +apiVersion: v1 +kind: Service +metadata: + name: zuul + namespace: imxc + labels: + app: cloud +spec: + type: NodePort + selector: + app: cloud + ports: + - port: 8080 + targetPort: 8080 + nodePort: 31081 diff --git a/roles/cmoa_install/files/05-imxc/values.yaml b/roles/cmoa_install/files/05-imxc/values.yaml new file mode 100644 index 0000000..07c9a47 --- /dev/null +++ b/roles/cmoa_install/files/05-imxc/values.yaml @@ -0,0 +1,157 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + IMXC_LDAP_USE: false + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AUDITLOG_PATH: /var/log + KAFKA_IP: kafka-broker + # 로드밸런서 안 쓴다고 가정했을때 입니다.. + KAFKA_INTERFACE_PORT: 9094 + APISERVER_NETTY_PORT: 10100 + #REGISTRY_URL: cdm-dev.exem-oss.org:5050 + #REGISTRY_URL: 10.10.31.243:5000/cmoa + IMXC_ADMIN_SERVER_DNS: imxc-admin-service + AGENT_IMAGE_TAG: rel0.0.0 + # Jaeger 관련변수 + JAEGER_AGENT_CLUSTERIP: 10.98.94.198 + JAEGER_JAVA_SPECIALAGENT_CLASSPATH: classpath:/install/opentracing-specialagent-1.7.4.jar + # added by DongWoo Kim 2021-06-21 + KEYCLOAK_AUTH_SERVER_URL: http://111.111.111.111:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_MASTER_USERNAME: admin + KEYCLOAK_MASTER_PASSWORD: admin + IMXC_PORTAL_INFO_URL: + KEYCLOAK_REALM: exem + # added by EunHye Kim 2021-08-25 + #DATAGATE_URLS: datagate + #DATAGATE_IP: 111.111.111.111 + #DATAGATE_PORT: 14268 + DATAGATE_INSIDE_IP: datagate + DATAGATE_INSIDE_PORT: 14268 + DATAGATE_OUTSIDE_IP: 111.111.111.111 + DATAGATE_OUTSIDE_PORT: 30051 + REDIS_URLS: redis-master + REDIS_PORT: 6379 + REDIS_PASSWORD: dkagh1234! + # added by DongWoo Kim 2021-08-31 (version of each module) + DATAGATE_VERSION: rel0.0.0 + #ADMIN_SERVER_VERSION: v1.0.0 + #API_SERVER_VERSION: CLOUD-172 + API_SERVER_VERSION: rel0.0.0 + COLLECTOR_VERSION: rel0.0.0 + #release-3.3.0 + TOPOLOGY_AGENT_VERSION: rel0.0.0 + METRIC_COLLECTOR_VERSION: rel0.0.0 + #v1.0.0 + METRIC_AGENT_VERSION: rel0.0.0 + # spring cloud + ZUUL_SERVER_VERSION: rel0.0.0 + #CMOA-1269 + EUREKA_SERVER_VERSION: rel0.0.0 + AUTH_SERVER_VERSION: rel0.0.0 + NOTI_SERVER_VERSION: rel0.0.0 + KAFKA_STREAM_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 + KUBE_INFO_FLAT_VERSION: rel0.0.0 + KUBE_INFO_BATCH_VERSION: rel0.0.0 + KUBE_INFO_CONNECTOR_VERSION: rel0.0.0 + + + CMOA_MANUAL_PORT: 31090 + + + # Keycloak + #KEYCLOAK_VERSION: v1.0.0 + + # 레지스트리 변수화 (Public Cloud 대비 / 아래 값 적절히 수정해서 사용할 것) + #IMXC_REGISTRY: 10.10.31.243:5000 + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + + # namespace 추가 + IMXC_NAMESPACE: imxc + + # ZUUL 8080으로 열어놓을것 + + CMOA_ES_ID: elastic + CMOA_ES_PW: elastic + + JDBC_KIND: 'postgres' + JDBC_SERVER: 'postgres:5432' + JDBC_DB: 'postgresdb' + JDBC_USER: 'admin' + JDBC_PWD: 'eorbahrhkswp' + + KAFKA_INPUT_TOPIC: 'kubernetes_info' + + TABLE_PREFIX: 'cmoa_' + BLACK_LIST: 'configmap_base,cronjob_active,endpoint_base,endpoint_addresses,endpoint_notreadyaddresses,endpoint_ports,event_base,node_image,persistentvolume_base,persistentvolumeclaim_base,pod_volume,resourcequota_base,resourcequota_scopeselector' + DELETE_HOUR: '15' + BACKLOGIN: false diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml new file mode 100644 index 0000000..e94fc14 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/cmoa-manual.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: manual + namespace: imxc +spec: + selector: + matchLabels: + app: manual + replicas: 1 + template: + metadata: + labels: + app: manual + spec: + containers: + - name: manual + image: {{ .Values.global.IMXC_IN_REGISTRY }}/manual:{{ .Values.global.CMOA_MANUAL_VERSION }} + imagePullPolicy: IfNotPresent + +--- +apiVersion: v1 +kind: Service +metadata: + name: manual + namespace: imxc +spec: + type: NodePort + selector: + app: manual + ports: + - protocol: TCP + port: 8088 + targetPort: 3000 + nodePort: {{ .Values.global.CMOA_MANUAL_PORT }} + diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml new file mode 100644 index 0000000..9fa97ed --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-config-jaeger.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config-jaeger + namespace: imxc +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + // Env Settings servletURL + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + demoServletURL: "{{ .Values.global.DEMO_SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings interMaxURL + interMaxURL: "http://{{ .Values.global.INTERMAX_IP }}:8080/intermax/?", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_UI_VERSION }}', + UI_build_ver: '{{ .Values.global.UI_SERVER_VERSION }}', + maxSelectionSize: 30, + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + healthIndicatorStateInfo: [ + { + state: "critical", + // max: 1.0, + // over: 0.8, + max: 100, + over: 80, + text: "Critical", + color: "#ff4040", + level: 4, + }, { + state: "warning", + // max: 0.8, + // over: 0.5, + max: 80, + over: 50, + text: "Warning", + color: "#ffa733", + level: 3, + }, { + state: "attention", + // max: 0.5, + // over: 0.0, + max: 50, + over: 0, + text: "Attention", + // color: "#B4B83D", + color: "#1cbe85", + level: 2, + }, { + state: "normal", + max: 0, + over: 0, + text: "Normal", + // color: "#64B87D", + color: "#24b0ed", + level: 1, + }, + ] + }; + + diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml new file mode 100644 index 0000000..a0d959f --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/templates/imxc-ui-server-jaeger.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service-jaeger + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui-jaeger + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31084 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui-jaeger + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui-jaeger + template: + metadata: + labels: + app: imxc-ui-jaeger + spec: + containers: + - name: imxc-ui-jaeger + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config-jaeger + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config-jaeger diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml new file mode 100644 index 0000000..bd63730 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jaeger/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : http + DEMO_SERVELET_URL_PROTOCOL : http + KEYCLOAK_AUTH_SERVER_URL: http://111.111.111.111:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: 111.111.111.111 + ZUUL_SERVER_PORT: 31081 + + NOTI_SERVER_IP: 111.111.111.111 + NOTI_SERVER_PORT: 31083 + + CMOA_MANUAL_SERVER_IP: 111.111.111.111 + CMOA_MANUAL_PORT: 31090 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel0.0.0 + UI_SERVER_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml new file mode 100644 index 0000000..e2f559f --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: imxc +version: 0.1.0 diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh new file mode 100644 index 0000000..45b8f1e --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-api-server.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + chmod -R 777 /home/cloudmoa/notification/cloudmoa_alert.log + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh new file mode 100644 index 0000000..279b8a5 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-auth-server.sh @@ -0,0 +1,36 @@ +#! /bin/bash + +# 200 -> 서버 및 realm이 있는 경우 +# 404 -> 서버는 있으나 realm이 없는 경우 +# 000 -> 서버가 없음 +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 404 ]; then + TOKEN="$(curl -s -d "client_id=admin-cli" -d "username=admin" -d "password=admin" -d "grant_type=password" http://imxc-keycloak-http/auth/realms/master/protocol/openid-connect/token | jq -r '.access_token')" + + echo $TOKEN + + echo "create realm and client" + # create realm and client + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "@/tmp/init.json" http://imxc-keycloak-http/auth/admin/realms + + + echo "create admin and owner" + # create admin and owner + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"admin","email":"admin@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + curl -s -v POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d '{"firstName":"","lastName":"", "username":"owner","email":"owner@example.com", "enabled":"true","credentials":[{"type":"password","value":"admin","temporary":false}]}' http://imxc-keycloak-http/auth/admin/realms/exem/users + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 200 ]; then + echo "exist exem relam" + + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +else + echo "not found keycloak. check to install keycloak" +fi diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh new file mode 100644 index 0000000..af73aed --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-noti-server.sh @@ -0,0 +1,14 @@ +#! /bin/sh + +STATUS_CODE="$(curl -s -o /dev/null -w '%{http_code}' http://imxc-keycloak-http/auth/realms/exem)" + +if [ $STATUS_CODE -eq 200 ]; then + JWT_KEY="$(curl -s -XGET http://imxc-keycloak-http/auth/realms/exem | jq -r '.public_key')" + export JWT_KEY + + java -Djava.security.egd=file:/dev/./urandom -jar /app.jar +elif [ $STATUS_CODE -eq 404 ]; then + echo "not found exem relam. check realm in imxc-keycloak" +else + echo "not found keycloak. check to install keycloak" +fi \ No newline at end of file diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh new file mode 100644 index 0000000..58db392 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init-resource.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +chmod -R 777 /scripts + +sed -i "s/localhost/$REDIRECT_URLS/g" /scripts/init.json +cp /scripts/init.json /tmp/init.json \ No newline at end of file diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json new file mode 100644 index 0000000..dcd68b4 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/scripts/init.json @@ -0,0 +1,2148 @@ +{ + "id": "exem", + "realm": "exem", + "notBefore": 0, + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "enabled": true, + "sslRequired": "none", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "roles": { + "realm": [ + { + "id": "b361dcb8-4ec4-484e-a432-8d40a8ca5ac8", + "name": "offline_access", + "description": "${role_offline-access}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "621155f2-6c01-4e4a-bf11-47111503d696", + "name": "uma_authorization", + "description": "${role_uma_authorization}", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + }, + { + "id": "4aadd73a-e863-466a-932b-5bc81553fbf1", + "name": "access", + "composite": false, + "clientRole": false, + "containerId": "exem", + "attributes": {} + } + ], + "client": { + "realm-management": [ + { + "id": "e3eca547-c372-406a-abe7-30f554e13e63", + "name": "manage-realm", + "description": "${role_manage-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb1faff2-4cca-458c-b9da-96c1f6f5f647", + "name": "impersonation", + "description": "${role_impersonation}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "eb0f6ebb-8993-47f8-8979-2152ed92bf62", + "name": "create-client", + "description": "${role_create-client}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "29f0b39d-9cc9-4b40-ad81-00041897ae0c", + "name": "view-clients", + "description": "${role_view-clients}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-clients" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b6307563-9b35-4093-b0c4-a27df7cb82bd", + "name": "query-groups", + "description": "${role_query-groups}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "30091a91-f676-4e39-8ae2-ebfcee36c32a", + "name": "query-clients", + "description": "${role_query-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "b40ca071-2318-4f69-9664-f0dfe471d03b", + "name": "view-realm", + "description": "${role_view-realm}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "efd25ec7-e61f-4659-a772-907791aed58e", + "name": "view-authorization", + "description": "${role_view-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "4ad18bd0-f9a9-4fc7-8864-99afa71f95e4", + "name": "manage-users", + "description": "${role_manage-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a92c781f-7c6a-48d8-aa88-0b3aefb3c10c", + "name": "manage-events", + "description": "${role_manage-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "424933c1-3c03-49cd-955c-34aeeb0a3108", + "name": "manage-authorization", + "description": "${role_manage-authorization}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "5476db80-dbfa-408b-a934-5e8decc0af56", + "name": "manage-clients", + "description": "${role_manage-clients}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "acf53868-d09b-4865-92da-3b906307b979", + "name": "realm-admin", + "description": "${role_realm-admin}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "manage-realm", + "impersonation", + "create-client", + "view-clients", + "query-groups", + "query-clients", + "view-realm", + "view-authorization", + "manage-users", + "manage-events", + "manage-authorization", + "manage-clients", + "query-users", + "query-realms", + "manage-identity-providers", + "view-users", + "view-events", + "view-identity-providers" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "f2ad5f83-ffde-4cf4-acc4-21f7bcec4c38", + "name": "query-users", + "description": "${role_query-users}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "96a017bf-5211-4c20-a1b2-7493bc45a3ad", + "name": "query-realms", + "description": "${role_query-realms}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "d8051d4d-f26c-4a6d-bcdd-b3d8111d9d29", + "name": "manage-identity-providers", + "description": "${role_manage-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "8c929b20-abc3-4b78-88f2-ed3348426667", + "name": "view-users", + "description": "${role_view-users}", + "composite": true, + "composites": { + "client": { + "realm-management": [ + "query-groups", + "query-users" + ] + } + }, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "a337a8f7-8725-4ff7-85fc-ecc4b5ce1433", + "name": "view-events", + "description": "${role_view-events}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + }, + { + "id": "649350cf-925c-4502-84b4-ec8415f956d3", + "name": "view-identity-providers", + "description": "${role_view-identity-providers}", + "composite": false, + "clientRole": true, + "containerId": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "attributes": {} + } + ], + "authorization_server": [ + { + "id": "2346ca49-eb3e-4f2e-b0ec-4def9ea9655c", + "name": "access", + "composite": false, + "clientRole": true, + "containerId": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "attributes": {} + } + ], + "security-admin-console": [], + "admin-cli": [], + "account-console": [], + "broker": [ + { + "id": "133ff901-3a8f-48df-893b-4c7e9047e829", + "name": "read-token", + "description": "${role_read-token}", + "composite": false, + "clientRole": true, + "containerId": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "attributes": {} + } + ], + "account": [ + { + "id": "89c5f56f-5845-400b-ac9f-942c46d082e0", + "name": "manage-account-links", + "description": "${role_manage-account-links}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "2cba7fed-0a80-4dbd-bd2d-abfa2c6a985e", + "name": "view-profile", + "description": "${role_view-profile}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "f446a93d-143f-4071-9bdc-08aa2fdce6d2", + "name": "view-consent", + "description": "${role_view-consent}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "ef3364db-e008-4aec-9e74-04bac25cbe40", + "name": "manage-consent", + "description": "${role_manage-consent}", + "composite": true, + "composites": { + "client": { + "account": [ + "view-consent" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "96afbe32-3ac2-4345-bc17-06cf0e8de0b4", + "name": "view-applications", + "description": "${role_view-applications}", + "composite": false, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + }, + { + "id": "cf6861ca-4804-40d4-9016-c48e7ebf1c72", + "name": "manage-account", + "description": "${role_manage-account}", + "composite": true, + "composites": { + "client": { + "account": [ + "manage-account-links" + ] + } + }, + "clientRole": true, + "containerId": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "attributes": {} + } + ] + } + }, + "groups": [ + { + "id": "8d3f7332-7f72-47e2-9cb3-38331f0c29b5", + "name": "DEFAULT_TENANT", + "path": "/DEFAULT_TENANT", + "attributes": {}, + "realmRoles": [], + "clientRoles": {}, + "subGroups": [] + } + ], + "defaultRoles": [ + "offline_access", + "uma_authorization" + ], + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpSupportedApplications": [ + "FreeOTP", + "Google Authenticator" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account" + ] + } + ] + }, + "clients": [ + { + "id": "8c29b723-b140-4904-87d9-9b22627f9ff3", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "defaultRoles": [ + "view-profile", + "manage-account" + ], + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1e3d0c5d-c456-4c5f-93cf-58236273186a", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/exem/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/exem/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "cceae7c8-fa8d-48eb-a0a6-6013a2cc771e", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "d4d3e5a5-584c-4aff-a79f-ac3c31ace5a1", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "b9bbda1f-a756-4b72-9cd8-06a6dfd6d5bf", + "clientId": "authorization_server", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "localhost" + ], + "webOrigins": [ + "*" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "fdc71d6d-db86-414f-bd80-ed1f5e9a6975", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "73540f9a-bbb0-4c72-afbd-2a69586191e8", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": {}, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "bb6c56f1-126e-4356-9579-d95992a8d150", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/exem/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/exem/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "3cf06cab-00dd-486b-8e72-1a453a7031ca", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "role_list", + "roles", + "profile", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6a21eaaa-69c9-4519-8732-2155865a1891", + "name": "custom_jwt", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "fd7557f5-3174-4c65-8cd1-0e9f015a906f", + "name": "customizingJWT", + "protocol": "openid-connect", + "protocolMapper": "oidc-script-based-protocol-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "multivalued": "true", + "id.token.claim": "false", + "access.token.claim": "true", + "jsonType.label": "String", + "script": "/**\r\n * Available variables: \r\n * user - the current user\r\n * realm - the current realm\r\n * token - the current token\r\n * userSession - the current userSession\r\n * keycloakSession - the current keycloakSession\r\n */\r\n\r\n//insert your code here...\r\n\r\n// you can set standard fields in token - test code\r\n// token.setAcr(\"test value\");\r\n\r\n// you can set claims in the token - test code\r\n// token.getOtherClaims().put(\"claimName\", \"claim value\");\r\n\r\n// work with variables and return multivalued token value\r\nvar ArrayList = Java.type(\"java.util.ArrayList\");\r\nvar HashMap = Java.type(\"java.util.HashMap\");\r\nvar tenantInfoMap = new HashMap();\r\nvar tenantIpMap = new HashMap();\r\n\r\nvar forEach = Array.prototype.forEach;\r\n\r\nvar client = keycloakSession.getContext().getClient();\r\nvar groups = user.getGroups();\r\nvar clientRole = client.getRole(\"access\");\r\n\r\nforEach.call(groups.toArray(), function(group) {\r\n if(group.hasRole(clientRole)) {\r\n tenantIpMap.put(group.getName(), clientRole.getAttribute(\"ip\"));\r\n tenantInfoMap.put(group.getName(), group.getAttributes());\r\n }\r\n});\r\n\r\ntoken.setOtherClaims(\"tenantInfo\", tenantInfoMap);\r\n" + } + }, + { + "id": "2cb34189-9f06-4b9f-b066-c28e7930f0a5", + "name": "custom_phone", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "phone", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.phone", + "jsonType.label": "String" + } + }, + { + "id": "6bcb0aa9-8713-4e4b-b997-2e08d2dda0f4", + "name": "group_attr", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "groups", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "groups.attributes", + "jsonType.label": "String" + } + }, + { + "id": "03deb40b-4f83-436e-9eab-f479eed62460", + "name": "custom_name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "false", + "user.attribute": "name", + "id.token.claim": "false", + "access.token.claim": "true", + "claim.name": "attributes.name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "9fed7d81-3f42-41b0-b661-7875abb90b2b", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "d030d675-2c31-401a-a461-534211b3d2ec", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + }, + { + "id": "ca2026a0-84de-4b8d-bf0c-35f3d088b115", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "cf3e7fce-e9e8-40dc-bd0d-5cf7bac861c0", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "6b909bad-30d8-4095-a80b-d71589e8a0b4", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "73231863-d614-4725-9707-f5704c70893a", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "fad2c0b3-d6d6-46c9-b8a5-70cf2f3cd69e", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String" + } + }, + { + "id": "1fa51f0e-8fa8-4807-a381-c9756ce1d2ff", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "8be191ba-c7b8-45f1-a37f-2830595d4b54", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "93a4b53a-a281-4203-a070-0ad31e719b29", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "c716d4df-ad16-4a47-aa05-ded2a69313a3", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "db0fcb5b-bad6-42b7-8ab0-b90225100b8a", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "f1723d4c-6d93-40be-b5b8-5ca7083e55c7", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "9e95dff0-dc01-4efe-a414-21c83d94491c", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "16524b43-6bfc-4e05-868c-682e7e1e611c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "4444c30e-5da5-46e6-a201-64c28ab26e10", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + }, + { + "id": "0faa8ba7-6d4d-4ed4-ab89-334e1d18b503", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "4ccced80-99d8-4081-8d1d-37ed6d5aaf34", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "02aea132-f5e1-483c-968a-5fbb9cdfb82d", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "eb5d10fc-d4a8-473a-ac3e-35f3fb0f41bb", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "2467b8e5-f340-45a2-abff-c658eccf3ed3", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "50a9bb17-af12-481d-95dd-6aed1dd4bf56", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "80a65208-9425-4e66-b769-98c2f1c91e6e", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "68a750c6-b4b8-47f4-a919-752319e63213", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "e27abd0e-72c1-40de-a678-e9e4e2db8e7f", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + }, + { + "id": "04f3fa01-6a4c-44eb-bfd8-0a0e1c31bc4a", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "94e697d9-fbee-48d8-91d1-7bbc4f1fb44e", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "a2f05d76-947d-4ceb-969b-1b923be9a923", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "1966f863-ac5c-4cbc-a156-d5bd861728f0", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "18a9b452-cd8e-4c43-a9a8-0ea532074f74", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1583790a-ec7a-4899-a901-60e23fd0d969", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "7094b64a-492b-4f31-aa73-bb19d06ddb56", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "eff18c11-eaf4-4d6a-8365-90f646ea3cc5", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "3bb12700-3e6f-4a73-bfbb-cfd16a8ab007", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "e83e35b7-9650-4f7e-b182-65c184d261b3", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "role_list", + "profile", + "email", + "roles", + "web-origins", + "custom_jwt" + ], + "defaultOptionalClientScopes": [ + "offline_access", + "address", + "phone", + "microprofile-jwt" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "eventsEnabled": false, + "eventsListeners": [ + "jboss-logging" + ], + "enabledEventTypes": [], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "9b1dcf02-e9ec-4302-8aad-28f3250d1b2d", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "saml-user-property-mapper" + ] + } + }, + { + "id": "752137ea-bc3a-46c3-9d83-49cb370d39a9", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "f365d31f-ccc5-4e57-97bd-b2749b1ab5e5", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "52e385fd-3aa5-442d-b5e4-6ff659126196", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-full-name-mapper", + "oidc-usermodel-attribute-mapper", + "oidc-address-mapper", + "oidc-usermodel-property-mapper", + "saml-user-property-mapper", + "saml-role-list-mapper" + ] + } + }, + { + "id": "dbebbc9d-1b14-4d09-906c-b4e5638f9588", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "b3fc18dc-467f-4240-9b6d-f07df5c40aee", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "19e102da-1d66-4747-958b-9311e5156693", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "66e83112-7392-46cb-bbd5-b71586183ada", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "a60adc1b-3f6b-40d4-901f-d4f744f0d71b", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "bc1b25d8-b199-4d87-b606-6cde0f6eafb0", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "fe624aa7-54a3-43d8-b2a3-f74b543a9225", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "a837df3e-15cb-4d2a-8ce0-5eea5c704e76", + "alias": "Account verification options", + "description": "Method with which to verity the existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "59026e13-e2bd-4977-a868-505ea562f545", + "alias": "Authentication Options", + "description": "Authentication options.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "basic-auth", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "basic-auth-otp", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "484d422c-d9b4-4c0e-86d5-60463ecd24c9", + "alias": "Browser - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "0ec05058-6d09-4951-a116-19e8810e5d8e", + "alias": "Direct Grant - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "667c03cd-114c-4d9a-a7fa-7d2c27f10722", + "alias": "First broker login - Conditional OTP", + "description": "Flow to determine if the OTP is required for the authentication", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-otp-form", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "1510fbf7-239f-44aa-9955-72d42f6d99fd", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Account verification options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "5622e71d-e1f4-4711-a425-a8470d0a017e", + "alias": "Reset - Conditional OTP", + "description": "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-otp", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "09dfe405-5ef6-4940-8885-5adf867a74c8", + "alias": "User creation or linking", + "description": "Flow for the existing/non-existing user alternatives", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 20, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "a3eb6b61-1943-4fb7-9b2f-137826882662", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "First broker login - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "7f5e2f68-84bc-4703-b474-e3b092621195", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "auth-spnego", + "requirement": "DISABLED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "identity-provider-redirector", + "requirement": "ALTERNATIVE", + "priority": 25, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "ALTERNATIVE", + "priority": 30, + "flowAlias": "forms", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "224cc520-37f7-445e-ab1f-7ba547a45a0d", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "requirement": "ALTERNATIVE", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-jwt", + "requirement": "ALTERNATIVE", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-secret-jwt", + "requirement": "ALTERNATIVE", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "client-x509", + "requirement": "ALTERNATIVE", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "2e58184b-529b-450c-9731-29763d26b087", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "direct-grant-validate-password", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 30, + "flowAlias": "Direct Grant - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "c969ac8c-e7d8-44b5-ad4d-5fcb80514eac", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "de2259a4-7f92-42ec-994c-f55d8cba3b59", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "User creation or linking", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "6c2745d2-be21-4f3c-a291-5b3fc039432a", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 20, + "flowAlias": "Browser - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "ac8f5082-3fd0-47c5-854d-0dd9c3951668", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "REQUIRED", + "priority": 20, + "flowAlias": "Authentication Options", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "32030b4b-c82b-4c1a-a692-3b51eae74bbc", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "requirement": "REQUIRED", + "priority": 10, + "flowAlias": "registration form", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "b99fca4c-386c-4277-acc1-83e57e29244d", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-profile-action", + "requirement": "REQUIRED", + "priority": 40, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-password-action", + "requirement": "REQUIRED", + "priority": 50, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "registration-recaptcha-action", + "requirement": "DISABLED", + "priority": 60, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + }, + { + "id": "5edbc053-816a-434e-9866-6c0cc7e49f89", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-credential-email", + "requirement": "REQUIRED", + "priority": 20, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "authenticator": "reset-password", + "requirement": "REQUIRED", + "priority": 30, + "userSetupAllowed": false, + "autheticatorFlow": false + }, + { + "requirement": "CONDITIONAL", + "priority": 40, + "flowAlias": "Reset - Conditional OTP", + "userSetupAllowed": false, + "autheticatorFlow": true + } + ] + }, + { + "id": "460782e7-9644-4a34-8024-cb428cbe3991", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "requirement": "REQUIRED", + "priority": 10, + "userSetupAllowed": false, + "autheticatorFlow": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "67af6e65-853c-4bfd-9eef-72e735691377", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "af6c6e01-772d-426a-bdd3-3ebc95537bcd", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "clientOfflineSessionMaxLifespan": "0", + "clientSessionIdleTimeout": "0", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0" + }, + "keycloakVersion": "11.0.1", + "userManagedAccessAllowed": false +} \ No newline at end of file diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml new file mode 100644 index 0000000..e47ff66 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-config.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: imxc-ui-config + namespace: imxc + +data: + properties.file: | + api.url = {{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }} + config.js: | + window.appEnv = { + offlineAccess: "{{ .Values.global.OFFLINEACCESS }}", + backLogin: "{{ .Values.global.BACKLOGIN }}", + // Env Settings servletURL + servletURL: "{{ .Values.global.SERVELET_URL_PROTOCOL }}://{{ .Values.global.ZUUL_SERVER_IP }}:{{ .Values.global.ZUUL_SERVER_PORT }}", + // Env Settings socketURL + socketURL: "http://{{ .Values.global.NOTI_SERVER_IP }}:{{ .Values.global.NOTI_SERVER_PORT }}/ui-server-websocket", + // Env Settings interMaxURL + // ex) ~/intermax/?paConnect=1&paType=ResponseInspector&fromTime=1556096539206&toTime=1556096599206&serverName=jeus89 + interMaxURL: "", + manualURL: "http://{{ .Values.global.CMOA_MANUAL_SERVER_IP }}:{{ .Values.global.CMOA_MANUAL_PORT }}", + // Env Settings CloudMOA Version + version: '{{ .Values.global.CLOUDMOA_VERSION }}', + loginType: 'keycloak', + keyCloak: { + "realm": "{{ .Values.global.KEYCLOAK_REALM }}", + "auth-server-url": "{{ .Values.global.KEYCLOAK_AUTH_SERVER_URL }}", + "ssl-required": "none", + "resource": "{{ .Values.global.KEYCLOAK_RESOURCE }}", + "public-client": true, + "confidential-port": 0 + }, + // refreshTime: '4', // 리로드 주기 설정 4로 설정시 새벽 4시에 리로드 하게 됨 + intervalTime: { // 5의 배수여야만 함 + short: 5, + medium: 10, + long: 60, + }, + // excludedContents: { + // anomalyScoreSettings: true, // entity black list setting page + // anomalyScoreInSidebar: true, // anomaly score in side bar + // }, + serviceTraceAgentType: 'jspd' + }; diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml new file mode 100644 index 0000000..35c4b61 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/templates/imxc-ui-server.yaml @@ -0,0 +1,63 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: imxc-ui-service + namespace: imxc +spec: + type: NodePort + selector: + app: imxc-ui + ports: + - protocol: TCP + name: ui + port: 80 + targetPort: 9999 + nodePort: 31080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: imxc-ui + namespace: imxc + labels: + app: imxc-ui +spec: + revisionHistoryLimit: 0 + replicas: 1 + selector: + matchLabels: + app: imxc-ui + template: + metadata: + labels: + app: imxc-ui + spec: + containers: + - name: imxc-ui + image: {{ .Values.global.IMXC_IN_REGISTRY }}/ui-server:{{ .Values.global.UI_SERVER_VERSION }} + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 200m + memory: 100Mi + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - name: config-profile + mountPath: /usr/src/app/web/env + - name: config-server + mountPath: /usr/src/app/config + volumes: + - name: config-profile + configMap: + name: imxc-ui-config + items: + - key: "config.js" + path: "config.js" + - name: config-server + configMap: + name: imxc-ui-config diff --git a/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml new file mode 100644 index 0000000..bd63730 --- /dev/null +++ b/roles/cmoa_install/files/06-imxc-ui/imxc-ui-jspd/values.yaml @@ -0,0 +1,94 @@ +# Default values for imxc. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: 10.10.31.243:5000/cmoa3/nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + INTERMAX_IP: + SERVELET_URL_PROTOCOL : http + DEMO_SERVELET_URL_PROTOCOL : http + KEYCLOAK_AUTH_SERVER_URL: http://111.111.111.111:31082/auth + KEYCLOAK_RESOURCE: authorization_server + KEYCLOAK_REALM: exem + + IMXC_IN_REGISTRY: 10.10.31.243:5000/cmoa3 + + ZUUL_SERVER_IP: 111.111.111.111 + ZUUL_SERVER_PORT: 31081 + + NOTI_SERVER_IP: 111.111.111.111 + NOTI_SERVER_PORT: 31083 + + CMOA_MANUAL_SERVER_IP: 111.111.111.111 + CMOA_MANUAL_PORT: 31090 + + OFFLINEACCESS: false + BACKLOGIN: false + + CLOUDMOA_VERSION: rel0.0.0 + UI_SERVER_VERSION: rel0.0.0 + CMOA_MANUAL_VERSION: rel0.0.0 diff --git a/roles/cmoa_install/files/ip_change b/roles/cmoa_install/files/ip_change new file mode 100755 index 0000000..ac13cc7 --- /dev/null +++ b/roles/cmoa_install/files/ip_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_ip=$1 +after_ip=$2 +grep_path=$3 + +if [[ $before_ip == '' || $after_ip == '' ]]; then + echo '[Usage] $0 {before_ip} {after_ip}' + exit +fi + +grep -rn ${before_ip} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_ip}/${after_ip}/g" + +echo "success" \ No newline at end of file diff --git a/roles/cmoa_install/files/k8s_status b/roles/cmoa_install/files/k8s_status new file mode 100755 index 0000000..16b3c61 --- /dev/null +++ b/roles/cmoa_install/files/k8s_status @@ -0,0 +1,86 @@ +#! /usr/bin/python3 +#-*- coding:utf-8 -*- + +import os, sys, subprocess, io, time +from kubernetes import client, config +def debug_print(msg): + print(" # ", msg) + +def k8s_conn(KUBE_CONFIG_PATH): + config.load_kube_config( + config_file=KUBE_CONFIG_PATH + ) + k8s_api = client.CoreV1Api() + + return k8s_api + +def k8s_get_pod(k8s_api, namespace, target=''): + pretty=False + watch=False + timeout_seconds=30 + api_response = k8s_api.list_namespaced_pod(namespace, pretty=pretty, timeout_seconds=timeout_seconds, watch=watch) + pod_list=[] + for pod in api_response.items: + status = pod.status.phase + #container_status = pod.status.container_statuses[0] + #if container_status.started is False or container_status.ready is False: + # waiting_state = container_status.state.waiting + # if waiting_state.message is not None and 'Error' in waiting_state.message: + # status = waiting_state.reason + if target != '': + if target in pod.metadata.name: + return (pod.metadata.name + " " + status) + pod_list.append(pod.metadata.name+" "+status) + return pod_list + +def k8s_pod_status_check(k8s_api, waiting_time, namespace,except_pod=False): + num=0 + while True: + num+=1 + resp=k8s_get_pod(k8s_api, namespace) + all_run_flag=True + if debug_mode: + debug_print('-'*30) + debug_print('pod 상태 체크시도 : {} ({}s)'.format(num, waiting_time)) + debug_print('-'*30) + for i in resp: + if except_pod: + if except_pod in i.lower(): continue + if 'pending' in i.lower(): + all_run_flag=False + result='{} 결과: {}'.format(i, all_run_flag) + debug_print(result) + if all_run_flag: + if debug_mode: + debug_print('-'*30) + debug_print('[{}] pod All Running'.format(namespace)) + debug_print('-'*30) + for i in resp: debug_print(i) + break + else: time.sleep(int(waiting_time)) + +def main(): + namespace = os.sys.argv[1] + + try: + Except_k8s_pod = os.sys.argv[2] + except: + Except_k8s_pod = '' + + try: + KUBE_CONFIG_PATH = os.sys.argv[3] + os.environ["KUBECONFIG"]=KUBE_CONFIG_PATH + except: + KUBE_CONFIG_PATH = os.environ["KUBECONFIG"] + + k8s_api=k8s_conn(KUBE_CONFIG_PATH) + k8s_pod_status_check(k8s_api, 60, namespace, Except_k8s_pod) + + +if __name__ == "__main__": + try: + debug_mode=False + main() + except Exception as err: + print("[Usage] k8s_status {namespace} {Except_pod=(default=false)} {KUBECONFIG_PATH=(default=current env)}") + print(err) diff --git a/roles/cmoa_install/files/postgres_check_data b/roles/cmoa_install/files/postgres_check_data new file mode 100755 index 0000000..d377aeb --- /dev/null +++ b/roles/cmoa_install/files/postgres_check_data @@ -0,0 +1,6 @@ +#!/bin/bash + +namespace=$1 +pg_pod=`kubectl -n ${namespace} get pod --no-headers | awk '{print $1}' | grep postgres` +kubectl_cmd="kubectl -n ${namespace} exec -it ${pg_pod} --" +${kubectl_cmd} bash -c "echo \"select count(*) from pg_database where datname='keycloak';\" | /usr/bin/psql -U postgres | egrep -iv '(count|---|row)' | tr -d ' ' | tr -d '\n'" \ No newline at end of file diff --git a/roles/cmoa_install/files/rel_change b/roles/cmoa_install/files/rel_change new file mode 100755 index 0000000..ae1f6b3 --- /dev/null +++ b/roles/cmoa_install/files/rel_change @@ -0,0 +1,15 @@ +#!/bin/bash +if [ -z "$BASH_VERSION" ]; then exec bash "$0" "$@"; exit; fi + +before_version=$1 +after_version=$2 +grep_path=$3 + +if [[ $before_version == '' || $after_version == '' ]]; then + echo '[Usage] $0 {before_version} {after_version}' + exit +fi + +grep -rn ${before_version} ${grep_path} | awk -F':' {'print $1'} | uniq | /usr/bin/xargs sed -i "s/${before_version}/${after_version}/g" + +echo "success" \ No newline at end of file diff --git a/roles/cmoa_install/tasks/00-default-settings-master.yml b/roles/cmoa_install/tasks/00-default-settings-master.yml new file mode 100644 index 0000000..4a17c4a --- /dev/null +++ b/roles/cmoa_install/tasks/00-default-settings-master.yml @@ -0,0 +1,30 @@ +--- +- name: 1. Create a cmoa namespace + kubernetes.core.k8s: + name: "{{ cmoa_namespace }}" + api_version: v1 + kind: Namespace + state: present + +- name: 2. Create secret + kubernetes.core.k8s: + state: present + namespace: "{{ item }}" + src: "{{ role_path }}/files/00-default/secret_nexus.yaml" + apply: yes + with_items: + - "{{ cmoa_namespace }}" + - default + +- name: 3. kubeconfig check + shell: "echo $KUBECONFIG" + register: kubeconfig + +- name: 4. Patch default sa + shell: "{{ role_path }}/files/00-default/sa_patch.sh {{ kubeconfig.stdout }}" + +- name: 5. Master IP Setting + command: "{{ role_path }}/files/ip_change {{ before_ip }} {{ ansible_default_ipv4.address }} {{ role_path }}/files" + +- name: 6. CloudMOA Version Change + command: "{{ role_path }}/files/rel_change {{ before_version }} {{ cmoa_version }} {{ role_path }}/files" diff --git a/roles/cmoa_install/tasks/00-default-settings-node.yml b/roles/cmoa_install/tasks/00-default-settings-node.yml new file mode 100644 index 0000000..a568b74 --- /dev/null +++ b/roles/cmoa_install/tasks/00-default-settings-node.yml @@ -0,0 +1,27 @@ +--- +- name: 1. Node add Label (worker1) + kubernetes.core.k8s: + apply: yes + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker1 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker1 + +- name: 2. Node add Label (worker2) + kubernetes.core.k8s: + definition: + apiversion: v1 + kind: Node + metadata: + name: "{{ item }}" + labels: + cmoa: worker2 + with_items: + - "{{ ansible_hostname }}" + when: ansible_default_ipv4.address in groups.worker2 \ No newline at end of file diff --git a/roles/cmoa_install/tasks/01-storage-install.yml b/roles/cmoa_install/tasks/01-storage-install.yml new file mode 100644 index 0000000..bef58ef --- /dev/null +++ b/roles/cmoa_install/tasks/01-storage-install.yml @@ -0,0 +1,45 @@ +--- +- name: 1. yaml file install (sc, pv) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/01-storage/{{ item }}" + apply: yes + with_items: + - 00-storageclass.yaml + - 01-persistentvolume.yaml + +- name: 2. helmchart install (minio) + kubernetes.core.helm: + name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/01-storage/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/01-storage/{{item}}/values.yaml" + with_items: + - minio + +- name: 3. Change a Minio Api Service (NodePort=minio_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ minio_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ minio_service_port }}" + nodePort: "{{ minio_nodePort }}" + apply: yes + +- name: 4. Check Kubernetes Pods (minio) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 5. minio setting (minio) + command: "{{ role_path }}/files/01-storage/cmoa_minio {{ ansible_default_ipv4.address }}:{{ minio_nodePort }} {{ minio_user }} {{ bucket_name }} {{ days }} {{ rule_id }}" \ No newline at end of file diff --git a/roles/cmoa_install/tasks/02-base-install.yml b/roles/cmoa_install/tasks/02-base-install.yml new file mode 100644 index 0000000..f7924a6 --- /dev/null +++ b/roles/cmoa_install/tasks/02-base-install.yml @@ -0,0 +1,51 @@ +--- +- name: 1. kafka broker config apply (base) + kubernetes.core.k8s: + state: present + namespace: "{{ cmoa_namespace }}" + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 00-kafka-broker-config.yaml + +- name: 2. coredns config apply (base) + kubernetes.core.k8s: + state: present + namespace: default + src: "{{ role_path }}/files/02-base/{{ item }}" + apply: yes + with_items: + - 01-coredns.yaml + +- name: 3. helmchart install (base) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/02-base/{{item}}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/02-base/{{item}}/values.yaml" + with_items: + - base + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/roles/cmoa_install/tasks/03-ddl-dml.yml b/roles/cmoa_install/tasks/03-ddl-dml.yml new file mode 100644 index 0000000..be5af75 --- /dev/null +++ b/roles/cmoa_install/tasks/03-ddl-dml.yml @@ -0,0 +1,59 @@ +- name: 1. Check Postgres DB Data + command: "{{ role_path }}/files/postgres_check_data {{ cmoa_namespace }}" + register: pg_check_result + +- name: 2. Insert Elasticsearch template + command: "sh {{ role_path }}/files/03-ddl-dml/elasticsearch/es-ddl-put.sh {{ cmoa_namespace }}" + +- name: 2.1. Elasticsearch dependency deploy restart + command: "kubectl -n {{ cmoa_namespace }} rollout restart deploy alertmanager base-cortex-configs base-cortex-distributor base-cortex-ruler" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + +- name: 2.2. Check Kubernetes Pods (Elasticsearch dependency) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }} alertmanage" + +- name: 3. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" + register: pod_list + when: pg_check_result.stdout != '1' + +- name: 4. Copy psql file in postgres (DDL) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_ddl.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_ddl.psql" + when: item is match('postgres') and pg_check_result.stdout != '1' + with_items: "{{ pod_list.stdout_lines }}" + ignore_errors: true + +- name: 5. Execute a command in postgres (DDL) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_ddl.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 6. Copy psql file in postgres (DML) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/postgres_insert_dml.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true + +- name: 7. Execute a command in postgres (DML) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/postgres_insert_dml.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') + ignore_errors: true \ No newline at end of file diff --git a/roles/cmoa_install/tasks/04-keycloak-install.yml b/roles/cmoa_install/tasks/04-keycloak-install.yml new file mode 100644 index 0000000..de5fc9c --- /dev/null +++ b/roles/cmoa_install/tasks/04-keycloak-install.yml @@ -0,0 +1,34 @@ +--- +- name: 1. helmchart install (keycloak) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/04-keycloak" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/04-keycloak/values.yaml" + with_items: + - keycloak + +- name: 4. Check Kubernetes Pods (base) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + + +- name: 5. Change a Elasticsearch Service (NodePort=elasticsearch_nodePort) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ elasticsearch_service_name }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ elasticsearch_service_port }}" + nodePort: "{{ elasticsearch_nodePort }}" + apply: yes diff --git a/roles/cmoa_install/tasks/05-imxc-install.yml b/roles/cmoa_install/tasks/05-imxc-install.yml new file mode 100644 index 0000000..420d2d1 --- /dev/null +++ b/roles/cmoa_install/tasks/05-imxc-install.yml @@ -0,0 +1,16 @@ +--- +- name: 1. helmchart install (imxc) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/05-imxc" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/05-imxc/values.yaml" + with_items: + - imxc + +- name: 2. Check Kubernetes Pods (imxc / keycloak) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/roles/cmoa_install/tasks/06-imxc-ui-install.yml b/roles/cmoa_install/tasks/06-imxc-ui-install.yml new file mode 100644 index 0000000..7da82a1 --- /dev/null +++ b/roles/cmoa_install/tasks/06-imxc-ui-install.yml @@ -0,0 +1,112 @@ +--- +- name: 1. helmchart install (imxc-ui-all) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + - imxc-ui-jspd + when: imxc_ui == 'all' + +- name: 1. helmchart install (imxc-ui-jaeger) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jaeger + when: imxc_ui == 'jaeger' + +- name: 2. Change a imxc-ui Service (imxc-ui-jaeger) + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: "{{ jaeger_servicename }}" + namespace: "{{ cmoa_namespace }}" + spec: + type: NodePort + ports: + - protocol: TCP + port: "{{ jaeger_service_port }}" + nodePort: "{{ jaeger_nodePort }}" + apply: yes + when: imxc_ui == 'jaeger' + +- name: 2. Get a list of all pods from the namespace + command: kubectl -n "{{ cmoa_namespace }}" get pods --no-headers -o custom-columns=":metadata.name" # Output is a column + register: pod_list + when: imxc_ui != 'all' + +- name: 3. Copy psql file in psql (imxc-jaeger) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jaeger_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 4. Execute a command in psql (imxc-jaeger) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jaeger_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: + - item is match('postgres') + - imxc_ui == 'jaeger' + ignore_errors: true + +- name: 1. helmchart install (imxc-ui-jspd) + kubernetes.core.helm: + name: "{{item}}" + release_name: "{{item}}" + release_namespace: "{{ cmoa_namespace }}" + chart_ref: "{{ role_path }}/files/06-imxc-ui/{{ item }}" + create_namespace: yes + release_state: present + values_files: + - "{{ role_path }}/files/06-imxc-ui/{{ item }}/values.yaml" + with_items: + - imxc-ui-jspd + when: imxc_ui == 'jspd' + ignore_errors: true + +- name: 3. Copy psql file in postgres (imxc-ui-jspd) + kubernetes.core.k8s_cp: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + remote_path: /tmp/jspd_menumeta.psql + local_path: "{{ role_path }}/files/03-ddl-dml/postgres/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 4. Execute a command in postgres (imxc-ui-jspd) + kubernetes.core.k8s_exec: + namespace: "{{ cmoa_namespace }}" + pod: "{{ item }}" + command: bash -c "PGPASSWORD='eorbahrhkswp' && /usr/bin/psql -h 'localhost' -U 'admin' -d 'postgresdb' -f /tmp/jspd_menumeta.psql" + with_items: "{{ pod_list.stdout_lines }}" + when: item is match('postgres') and imxc_ui == 'jspd' + ignore_errors: true + +- name: 2. Check Kubernetes Pods (imxc ui) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" diff --git a/roles/cmoa_install/tasks/07-keycloak-setting.yml b/roles/cmoa_install/tasks/07-keycloak-setting.yml new file mode 100644 index 0000000..8e90b79 --- /dev/null +++ b/roles/cmoa_install/tasks/07-keycloak-setting.yml @@ -0,0 +1,90 @@ +--- +- name: 0. Generate keycloak auth token + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/realms/master/protocol/openid-connect/token" + method: POST + body: "client_id={{ keycloak_auth_client }}&username={{ keycloak_admin_user }}&password={{ keycloak_admin_password }}&grant_type=password" + validate_certs: no + register: keycloak_auth_response + until: keycloak_auth_response.status == 200 + retries: 5 + delay: 2 + +- name: 1. Determine if realm exists + ansible.builtin.uri: + url: "{{ keycloak_url }}{{ keycloak_context }}/admin/realms/{{ keycloak_realm }}" + method: GET + status_code: + - 200 + - 404 + headers: + Accept: "application/json" + Authorization: "Bearer {{ keycloak_auth_response.json.access_token }}" + register: keycloak_realm_exists + + +- name: 2. update a keycloak realm + community.general.keycloak_realm: + auth_client_id: "{{ keycloak_auth_client }}" + auth_keycloak_url: "{{ keycloak_url }}{{ keycloak_context }}" + auth_realm: "{{ keycloak_auth_realm }}" + auth_username: "{{ keycloak_admin_user }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ item.realm }}" + login_theme: "{{ keycloak_login_theme }}" + loop: "{{ keycloak_clients | flatten }}" + +- name: 3. Validate Keycloak clients + ansible.builtin.assert: + that: + - item.name is defined and item.name | length > 0 + - (item.client_id is defined and item.client_id | length > 0) or (item.id is defined and item.id | length > 0) + fail_msg: "For each keycloak client, attributes `name` and either `id` or `client_id` is required" + quiet: True + loop: "{{ keycloak_clients | flatten }}" + loop_control: + label: "{{ item.name | default('unnamed client') }}" + + +- name: 4. update a Keycloak client + community.general.keycloak_client: + auth_client_id: "{{ keycloak_auth_client }}" + auth_keycloak_url: "{{ keycloak_url }}{{ keycloak_context }}" + auth_realm: "{{ keycloak_auth_realm }}" + auth_username: "{{ keycloak_admin_user }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ item.realm }}" + default_roles: "{{ item.roles | default(omit) }}" + client_id: "{{ item.client_id | default(omit) }}" + id: "{{ item.id | default(omit) }}" + name: "{{ item.name | default(omit) }}" + description: "{{ item.description | default(omit) }}" + root_url: "{{ item.root_url | default('') }}" + admin_url: "{{ item.admin_url | default('') }}" + base_url: "{{ item.base_url | default('') }}" + enabled: "{{ item.enabled | default(True) }}" + redirect_uris: "{{ item.redirect_uris | default(omit) }}" + web_origins: "{{ item.web_origins | default('+') }}" + bearer_only: "{{ item.bearer_only | default(omit) }}" + standard_flow_enabled: "{{ item.standard_flow_enabled | default(omit) }}" + implicit_flow_enabled: "{{ item.implicit_flow_enabled | default(omit) }}" + direct_access_grants_enabled: "{{ item.direct_access_grants_enabled | default(omit) }}" + service_accounts_enabled: "{{ item.service_accounts_enabled | default(omit) }}" + public_client: "{{ item.public_client | default(False) }}" + protocol: "{{ item.protocol | default(omit) }}" + state: present + register: create_client_result + loop: "{{ keycloak_clients | flatten }}" + when: (item.name is defined and item.client_id is defined) or (item.name is defined and item.id is defined) + +- name: 5. Dependency deploy scale down + command: "kubectl -n {{ cmoa_namespace }} scale --replicas=0 deploy imxc-api noti-server auth-server zuul-deployment" + +- name: 6. Dependency deploy scale up + command: "kubectl -n {{ cmoa_namespace }} scale --replicas=1 deploy imxc-api noti-server auth-server zuul-deployment" + register: restart + +- debug: + msg: "{{restart.stdout_lines}}" + + diff --git a/roles/cmoa_install/tasks/08-finish.yml b/roles/cmoa_install/tasks/08-finish.yml new file mode 100644 index 0000000..4fd19f4 --- /dev/null +++ b/roles/cmoa_install/tasks/08-finish.yml @@ -0,0 +1,17 @@ +--- +- name: 0. Check Kubernetes Pods (ALL) + command: "{{ role_path }}/files/k8s_status {{ cmoa_namespace }}" + +- name: 1. IP Setting reset + command: "{{ role_path }}/files/ip_change {{ansible_default_ipv4.address}} {{before_ip}} {{ role_path }}/files" + +- name: 2. CloudMOA Version reset + command: "{{ role_path }}/files/rel_change {{ cmoa_version }} {{ before_version }} {{ role_path }}/files" + +- debug: + msg: + - ======================================================================================= + - "## CloudMOA WEB " + - CloudMOA Jaeger = http://{{ ansible_default_ipv4.address }}:31080 + - CloudMOA JSPD = http://{{ ansible_default_ipv4.address }}:31084 + - ======================================================================================= diff --git a/roles/cmoa_install/tasks/helm-install.yml b/roles/cmoa_install/tasks/helm-install.yml new file mode 100644 index 0000000..d057455 --- /dev/null +++ b/roles/cmoa_install/tasks/helm-install.yml @@ -0,0 +1,60 @@ +--- +- name: Create Helm temporary directory + file: + path: /tmp/helm + state: directory + mode: "0755" + +- name: Fetch Helm package + get_url: + url: 'https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz' + dest: /tmp/helm.tar.gz + checksum: '{{ helm_checksum }}' + +- name: Extract Helm package + unarchive: + remote_src: true + src: /tmp/helm.tar.gz + dest: /tmp/helm + +- name: Ensure "docker" group exists + group: + name: docker + state: present + become: true + +- name: Install helm to /usr/local/bin + copy: + remote_src: true + src: /tmp/helm/linux-amd64/helm + dest: /usr/local/bin/helm + owner: root + group: docker + mode: "0755" + become: true + +- name: Cleanup Helm temporary directory + file: + path: /tmp/helm + state: absent + +- name: Cleanup Helm temporary download + file: + path: /tmp/helm.tar.gz + state: absent + +- name: Ensure bash_completion.d directory exists + file: + path: /etc/bash_completion.d + state: directory + mode: "0755" + become: true + +- name: Setup Helm tab-completion + shell: | + set -o pipefail + /usr/local/bin/helm completion bash | tee /etc/bash_completion.d/helm + args: + executable: /bin/bash + changed_when: false + become: true diff --git a/roles/cmoa_install/tasks/main.yml b/roles/cmoa_install/tasks/main.yml new file mode 100644 index 0000000..7239fa3 --- /dev/null +++ b/roles/cmoa_install/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- include: helm-install.yml + tags: helm-install + +- include: 00-default-settings-master.yml + tags: default_setting + when: kubernetes_role == 'master' + +- include: 00-default-settings-node.yml + tags: default_setting_node + when: kubernetes_role == 'node' + +- include: 01-storage-install.yml + tags: storage-install + when: kubernetes_role == 'master' + +- include: 02-base-install.yml + tags: base-install + when: kubernetes_role == 'master' + +- include: 03-ddl-dml.yml + tags: ddl-dml + when: kubernetes_role == 'master' + +- include: 04-keycloak-install.yml + tags: keycloak-install + when: kubernetes_role == 'master' + +- include: 05-imxc-install.yml + tags: imxc-install + when: kubernetes_role == 'master' + +- include: 06-imxc-ui-install.yml + tags: imxc-ui-install + when: kubernetes_role == 'master' + +- include: 07-keycloak-setting.yml + tags: keycloak-setting + when: kubernetes_role == 'master' + +- include: 08-finish.yml + tags: finish + when: kubernetes_role == 'master' \ No newline at end of file diff --git a/roles/cmoa_install/templates/realm.json.j2 b/roles/cmoa_install/templates/realm.json.j2 new file mode 100644 index 0000000..1323ce2 --- /dev/null +++ b/roles/cmoa_install/templates/realm.json.j2 @@ -0,0 +1,7 @@ +{ + "id": "{{ keycloak_realm }}", + "realm": "{{ keycloak_realm }}", + "enabled": true, + "eventsEnabled": true, + "eventsExpiration": 7200 +} diff --git a/roles/cmoa_install/vars/main.yml b/roles/cmoa_install/vars/main.yml new file mode 100644 index 0000000..14c8e95 --- /dev/null +++ b/roles/cmoa_install/vars/main.yml @@ -0,0 +1,7 @@ +--- +# name of the realm to create, this is a required variable +keycloak_realm: Exem + +# other settings +keycloak_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_http_port }}" +keycloak_management_url: "http://{{ ansible_default_ipv4.address }}:{{ keycloak_management_http_port }}" diff --git a/roles/cmoa_os_setting/README.md b/roles/cmoa_os_setting/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/roles/cmoa_os_setting/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/roles/cmoa_os_setting/defaults/main.yml b/roles/cmoa_os_setting/defaults/main.yml new file mode 100644 index 0000000..55b8a06 --- /dev/null +++ b/roles/cmoa_os_setting/defaults/main.yml @@ -0,0 +1,140 @@ +helm_checksum: sha256:72f1c0fcfb17b41b89087e9232e50f20c606e44a0edc2bb9737e05d1c75b8c4f +helm_version: v3.10.2 + +kubernetes_version: 1.25.2 + +kubernetes_kubelet_extra_args: "" +kubernetes_kubeadm_init_extra_opts: "" +kubernetes_join_command_extra_opts: "" + +kubernetes_pod_network: + cni: 'calico' + cidr: '10.96.0.0/12' + +kubernetes_calico_manifest_file: https://docs.projectcalico.org/manifests/calico.yaml + +kubernetes_metric_server_file: https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + +containerd_config: + version: 2 + root: /var/lib/containerd + state: /run/containerd + plugin_dir: "" + disabled_plugins: [] + required_plugins: [] + oom_score: 0 + grpc: + address: /run/containerd/containerd.sock + tcp_address: "" + tcp_tls_cert: "" + tcp_tls_key: "" + uid: 0 + gid: 0 + max_recv_message_size: 16777216 + max_send_message_size: 16777216 + ttrpc: + address: "" + uid: 0 + gid: 0 + debug: + address: "" + uid: 0 + gid: 0 + level: "" + metrics: + address: "" + grpc_histogram: false + cgroup: + path: "" + timeouts: + "io.containerd.timeout.shim.cleanup": 5s + "io.containerd.timeout.shim.load": 5s + "io.containerd.timeout.shim.shutdown": 3s + "io.containerd.timeout.task.state": 2s + plugins: + "io.containerd.gc.v1.scheduler": + pause_threshold: 0.02 + deletion_threshold: 0 + mutation_threshold: 100 + schedule_delay: 0s + startup_delay: 100ms + "io.containerd.grpc.v1.cri": + disable_tcp_service: true + stream_server_address: 127.0.0.1 + stream_server_port: "0" + stream_idle_timeout: 4h0m0s + enable_selinux: false + sandbox_image: k8s.gcr.io/pause:3.1 + stats_collect_period: 10 + systemd_cgroup: false + enable_tls_streaming: false + max_container_log_line_size: 16384 + disable_cgroup: false + disable_apparmor: false + restrict_oom_score_adj: false + max_concurrent_downloads: 3 + disable_proc_mount: false + containerd: + snapshotter: overlayfs + default_runtime_name: runc + no_pivot: false + default_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + untrusted_workload_runtime: + runtime_type: "" + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + runtimes: + runc: + runtime_type: io.containerd.runc.v1 + runtime_engine: "" + runtime_root: "" + privileged_without_host_devices: false + cni: + bin_dir: /opt/cni/bin + conf_dir: /etc/cni/net.d + max_conf_num: 1 + conf_template: "" + registry: + configs: + "10.10.31.243:5000": + tls: + insecure_skip_verify: true + mirrors: + "docker.io": + endpoint: + - https://registry-1.docker.io + "10.10.31.243:5000": + endpoint: + - http://10.10.31.243:5000 + x509_key_pair_streaming: + tls_cert_file: "" + tls_key_file: "" + "io.containerd.internal.v1.opt": + path: /opt/containerd + "io.containerd.internal.v1.restart": + interval: 10s + "io.containerd.metadata.v1.bolt": + content_sharing_policy: shared + "io.containerd.monitor.v1.cgroups": + no_prometheus: false + "io.containerd.runtime.v1.linux": + shim: containerd-shim + runtime: runc + runtime_root: "" + no_shim: false + shim_debug: false + "io.containerd.runtime.v2.task": + platforms: + - linux/amd64 + "io.containerd.service.v1.diff-service": + default: + - walking + "io.containerd.snapshotter.v1.devmapper": + root_path: "" + pool_name: "" + base_image_size: "" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/.helmignore b/roles/cmoa_os_setting/files/ingress-nginx/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/roles/cmoa_os_setting/files/ingress-nginx/CHANGELOG.md b/roles/cmoa_os_setting/files/ingress-nginx/CHANGELOG.md new file mode 100644 index 0000000..27a52e8 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/CHANGELOG.md @@ -0,0 +1,445 @@ +# Changelog + +This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org). + +### 4.2.1 + +- The sha of kube-webhook-certgen image & the opentelemetry image, in values file, was changed to new images built on alpine-v3.16.1 +- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + +### 4.2.0 + +- Support for Kubernetes v1.19.0 was removed +- "[8810](https://github.com/kubernetes/ingress-nginx/pull/8810) Prepare for v1.3.0" +- "[8808](https://github.com/kubernetes/ingress-nginx/pull/8808) revert arch var name" +- "[8805](https://github.com/kubernetes/ingress-nginx/pull/8805) Bump k8s.io/klog/v2 from 2.60.1 to 2.70.1" +- "[8803](https://github.com/kubernetes/ingress-nginx/pull/8803) Update to nginx base with alpine v3.16" +- "[8802](https://github.com/kubernetes/ingress-nginx/pull/8802) chore: start v1.3.0 release process" +- "[8798](https://github.com/kubernetes/ingress-nginx/pull/8798) Add v1.24.0 to test matrix" +- "[8796](https://github.com/kubernetes/ingress-nginx/pull/8796) fix: add MAC_OS variable for static-check" +- "[8793](https://github.com/kubernetes/ingress-nginx/pull/8793) changed to alpine-v3.16" +- "[8781](https://github.com/kubernetes/ingress-nginx/pull/8781) Bump github.com/stretchr/testify from 1.7.5 to 1.8.0" +- "[8778](https://github.com/kubernetes/ingress-nginx/pull/8778) chore: remove stable.txt from release process" +- "[8775](https://github.com/kubernetes/ingress-nginx/pull/8775) Remove stable" +- "[8773](https://github.com/kubernetes/ingress-nginx/pull/8773) Bump github/codeql-action from 2.1.14 to 2.1.15" +- "[8772](https://github.com/kubernetes/ingress-nginx/pull/8772) Bump ossf/scorecard-action from 1.1.1 to 1.1.2" +- "[8771](https://github.com/kubernetes/ingress-nginx/pull/8771) fix bullet md format" +- "[8770](https://github.com/kubernetes/ingress-nginx/pull/8770) Add condition for monitoring.coreos.com/v1 API" +- "[8769](https://github.com/kubernetes/ingress-nginx/pull/8769) Fix typos and add links to developer guide" +- "[8767](https://github.com/kubernetes/ingress-nginx/pull/8767) change v1.2.0 to v1.2.1 in deploy doc URLs" +- "[8765](https://github.com/kubernetes/ingress-nginx/pull/8765) Bump github/codeql-action from 1.0.26 to 2.1.14" +- "[8752](https://github.com/kubernetes/ingress-nginx/pull/8752) Bump github.com/spf13/cobra from 1.4.0 to 1.5.0" +- "[8751](https://github.com/kubernetes/ingress-nginx/pull/8751) Bump github.com/stretchr/testify from 1.7.2 to 1.7.5" +- "[8750](https://github.com/kubernetes/ingress-nginx/pull/8750) added announcement" +- "[8740](https://github.com/kubernetes/ingress-nginx/pull/8740) change sha e2etestrunner and echoserver" +- "[8738](https://github.com/kubernetes/ingress-nginx/pull/8738) Update docs to make it easier for noobs to follow step by step" +- "[8737](https://github.com/kubernetes/ingress-nginx/pull/8737) updated baseimage sha" +- "[8736](https://github.com/kubernetes/ingress-nginx/pull/8736) set ld-musl-path" +- "[8733](https://github.com/kubernetes/ingress-nginx/pull/8733) feat: migrate leaderelection lock to leases" +- "[8726](https://github.com/kubernetes/ingress-nginx/pull/8726) prometheus metric: upstream_latency_seconds" +- "[8720](https://github.com/kubernetes/ingress-nginx/pull/8720) Ci pin deps" +- "[8719](https://github.com/kubernetes/ingress-nginx/pull/8719) Working OpenTelemetry sidecar (base nginx image)" +- "[8714](https://github.com/kubernetes/ingress-nginx/pull/8714) Create Openssf scorecard" +- "[8708](https://github.com/kubernetes/ingress-nginx/pull/8708) Bump github.com/prometheus/common from 0.34.0 to 0.35.0" +- "[8703](https://github.com/kubernetes/ingress-nginx/pull/8703) Bump actions/dependency-review-action from 1 to 2" +- "[8701](https://github.com/kubernetes/ingress-nginx/pull/8701) Fix several typos" +- "[8699](https://github.com/kubernetes/ingress-nginx/pull/8699) fix the gosec test and a make target for it" +- "[8698](https://github.com/kubernetes/ingress-nginx/pull/8698) Bump actions/upload-artifact from 2.3.1 to 3.1.0" +- "[8697](https://github.com/kubernetes/ingress-nginx/pull/8697) Bump actions/setup-go from 2.2.0 to 3.2.0" +- "[8695](https://github.com/kubernetes/ingress-nginx/pull/8695) Bump actions/download-artifact from 2 to 3" +- "[8694](https://github.com/kubernetes/ingress-nginx/pull/8694) Bump crazy-max/ghaction-docker-buildx from 1.6.2 to 3.3.1" + +### 4.1.2 + +- "[8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed" +- "[8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter" +- "[8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart" + +### 4.1.0 + +- "[8481](https://github.com/kubernetes/ingress-nginx/pull/8481) Fix log creation in chroot script" +- "[8479](https://github.com/kubernetes/ingress-nginx/pull/8479) changed nginx base img tag to img built with alpine3.14.6" +- "[8478](https://github.com/kubernetes/ingress-nginx/pull/8478) update base images and protobuf gomod" +- "[8468](https://github.com/kubernetes/ingress-nginx/pull/8468) Fallback to ngx.var.scheme for redirectScheme with use-forward-headers when X-Forwarded-Proto is empty" +- "[8456](https://github.com/kubernetes/ingress-nginx/pull/8456) Implement object deep inspector" +- "[8455](https://github.com/kubernetes/ingress-nginx/pull/8455) Update dependencies" +- "[8454](https://github.com/kubernetes/ingress-nginx/pull/8454) Update index.md" +- "[8447](https://github.com/kubernetes/ingress-nginx/pull/8447) typo fixing" +- "[8446](https://github.com/kubernetes/ingress-nginx/pull/8446) Fix suggested annotation-value-word-blocklist" +- "[8444](https://github.com/kubernetes/ingress-nginx/pull/8444) replace deprecated topology key in example with current one" +- "[8443](https://github.com/kubernetes/ingress-nginx/pull/8443) Add dependency review enforcement" +- "[8434](https://github.com/kubernetes/ingress-nginx/pull/8434) added new auth-tls-match-cn annotation" +- "[8426](https://github.com/kubernetes/ingress-nginx/pull/8426) Bump github.com/prometheus/common from 0.32.1 to 0.33.0" + +### 4.0.18 + +- "[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build" +- "[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build" +- "[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge" +- "[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241" +- "[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts" +- "[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error" +- "[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation" +- "[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric" +- "[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code." +- "[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image" +- "[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests" +- "[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint" +- "[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1" +- "[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0" +- "[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint" +- "[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial" +- "[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera" +- "[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment" +- "[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation" +- "[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell" +- "[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor" +- "[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations" +- "[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0" +- "[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account" +- "[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description" +- "[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests" +- "[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values" +- "[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1" +- "[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs" +- "[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits" +- "[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations" +- "[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT" + + +### 4.0.15 + +- [8120] https://github.com/kubernetes/ingress-nginx/pull/8120 Update go in runner and release v1.1.1 +- [8119] https://github.com/kubernetes/ingress-nginx/pull/8119 Update to go v1.17.6 +- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs +- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors +- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release +- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparision of P… +- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch +- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart +- [8058] https://github.com/kubernetes/ingress-nginx/pull/8058 Bump github.com/spf13/cobra from 1.2.1 to 1.3.0 +- [8054] https://github.com/kubernetes/ingress-nginx/pull/8054 Bump google.golang.org/grpc from 1.41.0 to 1.43.0 +- [8051] https://github.com/kubernetes/ingress-nginx/pull/8051 align bug report with feature request regarding kind documentation +- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045) +- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues +- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543 +- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executible name +- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners +- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option +- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags +- [8021] https://github.com/kubernetes/ingress-nginx/pull/8021 Disable default modsecurity_rules_file if modsecurity-snippet is specified +- [8019] https://github.com/kubernetes/ingress-nginx/pull/8019 Revise main documentation page +- [8018] https://github.com/kubernetes/ingress-nginx/pull/8018 Preserve order of plugin invocation +- [8015] https://github.com/kubernetes/ingress-nginx/pull/8015 Add newline indenting to admission webhook annotations +- [8014] https://github.com/kubernetes/ingress-nginx/pull/8014 Add link to example error page manifest in docs +- [8009] https://github.com/kubernetes/ingress-nginx/pull/8009 Fix spelling in documentation and top-level files +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml +- [8003] https://github.com/kubernetes/ingress-nginx/pull/8003 Minor improvements (formatting, consistency) in install guide +- [8001] https://github.com/kubernetes/ingress-nginx/pull/8001 fix: go-grpc Dockerfile +- [7999] https://github.com/kubernetes/ingress-nginx/pull/7999 images: use k8s-staging-test-infra/gcb-docker-gcloud +- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement +- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation. +- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs +- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to defaul server +- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog +- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition + +### 4.0.14 + +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 Using helm-docs to populate values table in README.md + +### 4.0.13 + +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml + +### 4.0.12 + +- [7978] https://github.com/kubernetes/ingress-nginx/pull/7979 Support custom annotations in admissions Jobs + +### 4.0.11 + +- [7873] https://github.com/kubernetes/ingress-nginx/pull/7873 Makes the [appProtocol](https://kubernetes.io/docs/concepts/services-networking/_print/#application-protocol) field optional. + +### 4.0.10 + +- [7964] https://github.com/kubernetes/ingress-nginx/pull/7964 Update controller version to v1.1.0 + +### 4.0.9 + +- [6992] https://github.com/kubernetes/ingress-nginx/pull/6992 Add ability to specify labels for all resources + +### 4.0.7 + +- [7923] https://github.com/kubernetes/ingress-nginx/pull/7923 Release v1.0.5 of ingress-nginx +- [7806] https://github.com/kubernetes/ingress-nginx/pull/7806 Choice option for internal/external loadbalancer type service + +### 4.0.6 + +- [7804] https://github.com/kubernetes/ingress-nginx/pull/7804 Release v1.0.4 of ingress-nginx +- [7651] https://github.com/kubernetes/ingress-nginx/pull/7651 Support ipFamilyPolicy and ipFamilies fields in Helm Chart +- [7798] https://github.com/kubernetes/ingress-nginx/pull/7798 Exoscale: use HTTP Healthcheck mode +- [7793] https://github.com/kubernetes/ingress-nginx/pull/7793 Update kube-webhook-certgen to v1.1.1 + +### 4.0.5 + +- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx + +### 4.0.3 + +- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx + +### 4.0.2 + +- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx + +### 4.0.1 + +- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx + +### 3.34.0 + +- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates + +### 3.33.0 + +- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1 + +### 3.32.0 + +- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA + +### 3.31.0 + +- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes + +### 3.30.0 + +- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints + +### 3.29.0 + +- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor + +### 3.28.0 + +- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs + +### 3.27.0 + +- Update ingress-nginx v0.45.0 + +### 3.26.0 + +- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics + +### 3.25.0 + +- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken + +### 3.24.0 + +- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment + +### 3.23.0 + +- Update ingress-nginx v0.44.0 + +### 3.22.0 + +- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file +- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart + +### 3.21.0 + +- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject +- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values +- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled +- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1 + +### 3.20.1 + +- Do not create KEDA in case of DaemonSets. +- Fix KEDA v2 definition + +### 3.20.0 + +- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled. + +### 3.19.0 + +- Update ingress-nginx v0.43.0 + +### 3.18.0 + +- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy +- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters + +### 3.17.0 + +- Update ingress-nginx v0.42.0 + +### 3.16.1 + +- Fix chart-releaser action + +### 3.16.0 + +- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service + +### 3.15.1 + +- Fix chart-releaser action + +### 3.15.0 + +- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml + +### 3.14.0 + +- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend + +### 3.13.0 + +- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable + +### 3.12.0 + +- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs + +### 3.11.1 + +- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling + +### 3.11.0 + +- Support Keda Autoscaling + +### 3.10.1 + +- Fix regression introduced in 0.41.0 with external authentication + +### 3.10.0 + +- Fix routing regression introduced in 0.41.0 with PathType Exact + +### 3.9.0 + +- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling + +### 3.8.0 + +- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image +- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs +- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend +- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations +- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog + +### 3.7.1 + +- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart + +### 3.7.0 + +- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315) + +### 3.6.0 + +- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector + +### 3.5.1 + +- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release + +### 3.5.0 + +- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations + +### 3.4.0 + +- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288 + +### 3.3.1 + +- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart +- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link +- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0 + +### 3.3.1 + +- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test + +### 3.3.0 + +- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values +- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort +- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression +- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules + +### 3.0.0 + +- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements + +### 2.16.0 + +- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller + +### 2.15.0 + +- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec + +### 2.14.0 + +- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 +- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip + +### 2.12.1 + +- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples + +### 2.12.0 + +- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels +- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting + +### 2.11.3 + +- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH + +### 2.11.2 + +- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version + +### 2.11.1 + +- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1 + +### 2.11.0 + +- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0 +- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe + +### 2.10.0 + +- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image + +### 2.9.1 + +- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823) + +### 2.9.0 + +- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues + + +### TODO + +Keep building the changelog using *git log charts* checking the tag diff --git a/roles/cmoa_os_setting/files/ingress-nginx/Chart.yaml b/roles/cmoa_os_setting/files/ingress-nginx/Chart.yaml new file mode 100644 index 0000000..55c0b54 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + artifacthub.io/changes: | + - "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today" + - "fix permissions about configmap" + artifacthub.io/prerelease: "false" +apiVersion: v2 +appVersion: 1.3.1 +description: Ingress controller for Kubernetes using NGINX as a reverse proxy and + load balancer +home: https://github.com/kubernetes/ingress-nginx +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +kubeVersion: '>=1.20.0-0' +maintainers: +- name: rikatz +- name: strongjz +- name: tao12345666333 +name: ingress-nginx +sources: +- https://github.com/kubernetes/ingress-nginx +version: 4.2.5 diff --git a/roles/cmoa_os_setting/files/ingress-nginx/OWNERS b/roles/cmoa_os_setting/files/ingress-nginx/OWNERS new file mode 100644 index 0000000..6b7e049 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md + +approvers: +- ingress-nginx-helm-maintainers + +reviewers: +- ingress-nginx-helm-reviewers + +labels: +- area/helm diff --git a/roles/cmoa_os_setting/files/ingress-nginx/README.md b/roles/cmoa_os_setting/files/ingress-nginx/README.md new file mode 100644 index 0000000..4e6a696 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/README.md @@ -0,0 +1,494 @@ +# ingress-nginx + +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +![Version: 4.2.5](https://img.shields.io/badge/Version-4.2.5-informational?style=flat-square) ![AppVersion: 1.3.1](https://img.shields.io/badge/AppVersion-1.3.1-informational?style=flat-square) + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +## Requirements + +Kubernetes: `>=1.20.0-0` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| commonLabels | object | `{}` | | +| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers | +| controller.admissionWebhooks.annotations | object | `{}` | | +| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | | +| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | | +| controller.admissionWebhooks.enabled | bool | `true` | | +| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use | +| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | | +| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks | +| controller.admissionWebhooks.namespaceSelector | object | `{}` | | +| controller.admissionWebhooks.networkPolicyEnabled | bool | `false` | | +| controller.admissionWebhooks.objectSelector | object | `{}` | | +| controller.admissionWebhooks.patch.enabled | bool | `true` | | +| controller.admissionWebhooks.patch.image.digest | string | `"sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"` | | +| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | | +| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | | +| controller.admissionWebhooks.patch.image.tag | string | `"v1.3.0"` | | +| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources | +| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | | +| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # | +| controller.admissionWebhooks.patch.securityContext.fsGroup | int | `2000` | | +| controller.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | | +| controller.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | | +| controller.admissionWebhooks.patch.tolerations | list | `[]` | | +| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | | +| controller.admissionWebhooks.port | int | `8443` | | +| controller.admissionWebhooks.service.annotations | object | `{}` | | +| controller.admissionWebhooks.service.externalIPs | list | `[]` | | +| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | | +| controller.admissionWebhooks.service.servicePort | int | `443` | | +| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | | +| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # | +| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected | +| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet # | +| controller.autoscaling.behavior | object | `{}` | | +| controller.autoscaling.enabled | bool | `false` | | +| controller.autoscaling.maxReplicas | int | `11` | | +| controller.autoscaling.minReplicas | int | `1` | | +| controller.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| controller.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| controller.autoscalingTemplate | list | `[]` | | +| controller.config | object | `{}` | Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ | +| controller.configAnnotations | object | `{}` | Annotations to be added to the controller config configuration configmap. | +| controller.configMapNamespace | string | `""` | Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) | +| controller.containerName | string | `"controller"` | Configures the controller container name | +| controller.containerPort | object | `{"http":80,"https":443}` | Configures the ports that the nginx-controller listens on | +| controller.customTemplate.configMapKey | string | `""` | | +| controller.customTemplate.configMapName | string | `""` | | +| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. | +| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. | +| controller.electionID | string | `"ingress-controller-leader"` | Election ID to use for status update | +| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # | +| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.extraArgs | object | `{}` | Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use | +| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. | +| controller.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. | +| controller.extraModules | list | `[]` | | +| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. | +| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. | +| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the `hostNetwork: true` mode. | +| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. | +| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged | +| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not | +| controller.hostPort.ports.http | int | `80` | 'hostPort' http port | +| controller.hostPort.ports.https | int | `443` | 'hostPort' https port | +| controller.hostname | object | `{}` | Optionally customize the pod hostname. | +| controller.image.allowPrivilegeEscalation | bool | `true` | | +| controller.image.chroot | bool | `false` | | +| controller.image.digest | string | `"sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974"` | | +| controller.image.digestChroot | string | `"sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1"` | | +| controller.image.image | string | `"ingress-nginx/controller"` | | +| controller.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.image.registry | string | `"registry.k8s.io"` | | +| controller.image.runAsUser | int | `101` | | +| controller.image.tag | string | `"v1.3.1"` | | +| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation | +| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). | +| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass | +| controller.ingressClassResource.default | bool | `false` | Is this the default ingressClass for the cluster | +| controller.ingressClassResource.enabled | bool | `true` | Is this ingressClass enabled or not | +| controller.ingressClassResource.name | string | `"nginx"` | Name of the ingressClass | +| controller.ingressClassResource.parameters | object | `{}` | Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. | +| controller.keda.apiVersion | string | `"keda.sh/v1alpha1"` | | +| controller.keda.behavior | object | `{}` | | +| controller.keda.cooldownPeriod | int | `300` | | +| controller.keda.enabled | bool | `false` | | +| controller.keda.maxReplicas | int | `11` | | +| controller.keda.minReplicas | int | `1` | | +| controller.keda.pollingInterval | int | `30` | | +| controller.keda.restoreToOriginalReplicaCount | bool | `false` | | +| controller.keda.scaledObject.annotations | object | `{}` | | +| controller.keda.triggers | list | `[]` | | +| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` | +| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels # | +| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. # | +| controller.livenessProbe.failureThreshold | int | `5` | | +| controller.livenessProbe.httpGet.path | string | `"/healthz"` | | +| controller.livenessProbe.httpGet.port | int | `10254` | | +| controller.livenessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.livenessProbe.initialDelaySeconds | int | `10` | | +| controller.livenessProbe.periodSeconds | int | `10` | | +| controller.livenessProbe.successThreshold | int | `1` | | +| controller.livenessProbe.timeoutSeconds | int | `1` | | +| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases | +| controller.metrics.enabled | bool | `false` | | +| controller.metrics.port | int | `10254` | | +| controller.metrics.prometheusRule.additionalLabels | object | `{}` | | +| controller.metrics.prometheusRule.enabled | bool | `false` | | +| controller.metrics.prometheusRule.rules | list | `[]` | | +| controller.metrics.service.annotations | object | `{}` | | +| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | | +| controller.metrics.service.servicePort | int | `10254` | | +| controller.metrics.service.type | string | `"ClusterIP"` | | +| controller.metrics.serviceMonitor.additionalLabels | object | `{}` | | +| controller.metrics.serviceMonitor.enabled | bool | `false` | | +| controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | | +| controller.metrics.serviceMonitor.namespace | string | `""` | | +| controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | | +| controller.metrics.serviceMonitor.relabelings | list | `[]` | | +| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | | +| controller.metrics.serviceMonitor.targetLabels | list | `[]` | | +| controller.minAvailable | int | `1` | | +| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # | +| controller.name | string | `"controller"` | | +| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods # | +| controller.podLabels | object | `{}` | Labels to add to the pod container metadata | +| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods | +| controller.priorityClassName | string | `""` | | +| controller.proxySetHeaders | object | `{}` | Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers | +| controller.publishService | object | `{"enabled":true,"pathOverride":""}` | Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running. | +| controller.publishService.enabled | bool | `true` | Enable 'publishService' or not | +| controller.publishService.pathOverride | string | `""` | Allows overriding of the publish service to bind to Must be / | +| controller.readinessProbe.failureThreshold | int | `3` | | +| controller.readinessProbe.httpGet.path | string | `"/healthz"` | | +| controller.readinessProbe.httpGet.port | int | `10254` | | +| controller.readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.readinessProbe.initialDelaySeconds | int | `10` | | +| controller.readinessProbe.periodSeconds | int | `10` | | +| controller.readinessProbe.successThreshold | int | `1` | | +| controller.readinessProbe.timeoutSeconds | int | `1` | | +| controller.replicaCount | int | `1` | | +| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply | +| controller.resources.requests.cpu | string | `"100m"` | | +| controller.resources.requests.memory | string | `"90Mi"` | | +| controller.scope.enabled | bool | `false` | Enable 'scope' or not | +| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) | +| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. | +| controller.service.annotations | object | `{}` | | +| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 # | +| controller.service.enableHttp | bool | `true` | | +| controller.service.enableHttps | bool | `true` | | +| controller.service.enabled | bool | `true` | | +| controller.service.external.enabled | bool | `true` | | +| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. | +| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). | +| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. | +| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ | +| controller.service.labels | object | `{}` | | +| controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer | +| controller.service.loadBalancerSourceRanges | list | `[]` | | +| controller.service.nodePorts.http | string | `""` | | +| controller.service.nodePorts.https | string | `""` | | +| controller.service.nodePorts.tcp | object | `{}` | | +| controller.service.nodePorts.udp | object | `{}` | | +| controller.service.ports.http | int | `80` | | +| controller.service.ports.https | int | `443` | | +| controller.service.targetPorts.http | string | `"http"` | | +| controller.service.targetPorts.https | string | `"https"` | | +| controller.service.type | string | `"LoadBalancer"` | | +| controller.shareProcessNamespace | bool | `false` | | +| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls | +| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap | +| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready # wait up to five minutes for the drain of connections # | +| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # | +| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap | +| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # | +| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false | +| defaultBackend.affinity | object | `{}` | | +| defaultBackend.autoscaling.annotations | object | `{}` | | +| defaultBackend.autoscaling.enabled | bool | `false` | | +| defaultBackend.autoscaling.maxReplicas | int | `2` | | +| defaultBackend.autoscaling.minReplicas | int | `1` | | +| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.enabled | bool | `false` | | +| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| defaultBackend.extraArgs | object | `{}` | | +| defaultBackend.extraEnvs | list | `[]` | Additional environment variables to set for defaultBackend pods | +| defaultBackend.extraVolumeMounts | list | `[]` | | +| defaultBackend.extraVolumes | list | `[]` | | +| defaultBackend.image.allowPrivilegeEscalation | bool | `false` | | +| defaultBackend.image.image | string | `"defaultbackend-amd64"` | | +| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | | +| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | | +| defaultBackend.image.registry | string | `"registry.k8s.io"` | | +| defaultBackend.image.runAsNonRoot | bool | `true` | | +| defaultBackend.image.runAsUser | int | `65534` | | +| defaultBackend.image.tag | string | `"1.5"` | | +| defaultBackend.labels | object | `{}` | Labels to be added to the default backend resources | +| defaultBackend.livenessProbe.failureThreshold | int | `3` | | +| defaultBackend.livenessProbe.initialDelaySeconds | int | `30` | | +| defaultBackend.livenessProbe.periodSeconds | int | `10` | | +| defaultBackend.livenessProbe.successThreshold | int | `1` | | +| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.minAvailable | int | `1` | | +| defaultBackend.name | string | `"defaultbackend"` | | +| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # | +| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # | +| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata | +| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # | +| defaultBackend.port | int | `8080` | | +| defaultBackend.priorityClassName | string | `""` | | +| defaultBackend.readinessProbe.failureThreshold | int | `6` | | +| defaultBackend.readinessProbe.initialDelaySeconds | int | `0` | | +| defaultBackend.readinessProbe.periodSeconds | int | `5` | | +| defaultBackend.readinessProbe.successThreshold | int | `1` | | +| defaultBackend.readinessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.replicaCount | int | `1` | | +| defaultBackend.resources | object | `{}` | | +| defaultBackend.service.annotations | object | `{}` | | +| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # | +| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | | +| defaultBackend.service.servicePort | int | `80` | | +| defaultBackend.service.type | string | `"ClusterIP"` | | +| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | | +| defaultBackend.serviceAccount.create | bool | `true` | | +| defaultBackend.serviceAccount.name | string | `""` | | +| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # | +| dhParam | string | `nil` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param | +| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| podSecurityPolicy.enabled | bool | `false` | | +| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration | +| rbac.create | bool | `true` | | +| rbac.scope | bool | `false` | | +| revisionHistoryLimit | int | `10` | Rollback limit # | +| serviceAccount.annotations | object | `{}` | Annotations for the controller service account | +| serviceAccount.automountServiceAccountToken | bool | `true` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| tcp | object | `{}` | TCP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | +| udp | object | `{}` | UDP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # | + diff --git a/roles/cmoa_os_setting/files/ingress-nginx/README.md.gotmpl b/roles/cmoa_os_setting/files/ingress-nginx/README.md.gotmpl new file mode 100644 index 0000000..8959961 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/README.md.gotmpl @@ -0,0 +1,235 @@ +{{ template "chart.header" . }} +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +{{ template "helm-docs.versionFooter" . }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml new file mode 100644 index 0000000..b28a232 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/controller-custom-ingressclass-flags.yaml @@ -0,0 +1,7 @@ +controller: + watchIngressWithoutClass: true + ingressClassResource: + name: custom-nginx + enabled: true + default: true + controllerValue: "k8s.io/custom-nginx" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml new file mode 100644 index 0000000..4393a5b --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,14 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + kind: DaemonSet + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP + + config: + use-proxy-protocol: "true" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 0000000..1d94be2 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,22 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml new file mode 100644 index 0000000..f299dbf --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml new file mode 100644 index 0000000..ab7d47b --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-headers-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml new file mode 100644 index 0000000..0a200a7 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-internal-lb-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml new file mode 100644 index 0000000..3b7aa2f --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml new file mode 100644 index 0000000..0b55306 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-podannotations-values.yaml @@ -0,0 +1,17 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..acd86a7 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,20 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..90b0f57 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,18 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml new file mode 100644 index 0000000..25ee64d --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-udp-values.yaml @@ -0,0 +1,16 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml new file mode 100644 index 0000000..380c8b4 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/daemonset-tcp-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml new file mode 100644 index 0000000..82fa23e --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-default-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml new file mode 100644 index 0000000..cb3cb54 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-metrics-values.yaml @@ -0,0 +1,12 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml new file mode 100644 index 0000000..8026a63 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml new file mode 100644 index 0000000..fccdb13 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml new file mode 100644 index 0000000..54d364d --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deamonset-webhook-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml new file mode 100644 index 0000000..dca3f35 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-behavior-values.yaml @@ -0,0 +1,14 @@ +controller: + autoscaling: + enabled: true + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 180 + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml new file mode 100644 index 0000000..b8b3ac6 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-autoscaling-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + autoscaling: + enabled: true + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml new file mode 100644 index 0000000..1749418 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customconfig-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + config: + use-proxy-protocol: "true" + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml new file mode 100644 index 0000000..a564eaf --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,20 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml new file mode 100644 index 0000000..9f46b4e --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-default-values.yaml @@ -0,0 +1,8 @@ +# Left blank to test default values +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml new file mode 100644 index 0000000..ec59235 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-extra-modules.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml new file mode 100644 index 0000000..17a11ac --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-headers-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml new file mode 100644 index 0000000..fd8df8d --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-internal-lb-values.yaml @@ -0,0 +1,13 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml new file mode 100644 index 0000000..9209ad5 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-metrics-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml new file mode 100644 index 0000000..cd9b323 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-nodeport-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml new file mode 100644 index 0000000..b48d93c --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-podannotations-values.yaml @@ -0,0 +1,16 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml new file mode 100644 index 0000000..2f332a7 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-psp-values.yaml @@ -0,0 +1,10 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..c51a4e9 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,19 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..56323c5 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,17 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml new file mode 100644 index 0000000..5b45b69 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-udp-values.yaml @@ -0,0 +1,15 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml new file mode 100644 index 0000000..ac0b6e6 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-tcp-values.yaml @@ -0,0 +1,11 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml new file mode 100644 index 0000000..6195bb3 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml @@ -0,0 +1,12 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml new file mode 100644 index 0000000..95487b0 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-extraEnvs-values.yaml @@ -0,0 +1,12 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + extraEnvs: + - name: FOO + value: foo + - name: TEST + value: test + patch: + enabled: true diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml new file mode 100644 index 0000000..49ebbb0 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-resources-values.yaml @@ -0,0 +1,23 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + createSecretJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patchWebhookJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patch: + enabled: true diff --git a/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml new file mode 100644 index 0000000..76669a5 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/ci/deployment-webhook-values.yaml @@ -0,0 +1,9 @@ +controller: + image: + repository: ingress-controller/controller + tag: 1.0.0-dev + digest: null + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/roles/cmoa_os_setting/files/ingress-nginx/override-values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/override-values.yaml new file mode 100644 index 0000000..e190f03 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/override-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + + service: + type: LoadBalancer + nodePorts: + http: "30000" + https: "30001" + tcp: {} + udp: {} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/temp.yaml b/roles/cmoa_os_setting/files/ingress-nginx/temp.yaml new file mode 100644 index 0000000..2b28787 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/temp.yaml @@ -0,0 +1,724 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + replicas: 1 + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/roles/cmoa_os_setting/files/ingress-nginx/temp2.yaml b/roles/cmoa_os_setting/files/ingress-nginx/temp2.yaml new file mode 100644 index 0000000..9ef52fc --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/temp2.yaml @@ -0,0 +1,725 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +data: + allow-snippet-annotations: "true" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: release-name-ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx + namespace: "default" +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller-admission + namespace: default +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + type: LoadBalancer + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + nodePort: 30000 + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + nodePort: 30001 + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: release-name-ingress-nginx-controller + namespace: default +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: release-name-ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: release-name-ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: release-name-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: "default" + name: release-name-ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: release-name-ingress-nginx-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: release-name-ingress-nginx-admission + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: release-name-ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: release-name-ingress-nginx-admission + namespace: "default" +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-create + namespace: default + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - create + - --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=release-name-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-ingress-nginx-admission-patch + namespace: default + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: release-name-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.2.5 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "1.3.1" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47" + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=release-name-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=release-name-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: release-name-ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/NOTES.txt b/roles/cmoa_os_setting/files/ingress-nginx/templates/NOTES.txt new file mode 100644 index 0000000..8985c56 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/NOTES.txt @@ -0,0 +1,80 @@ +The ingress-nginx controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + +{{- $isV1 := semverCompare ">=1" .Chart.AppVersion}} + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + name: example + namespace: foo + {{- if eq $isV1 false }} + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + {{- end }} + spec: + {{- if $isV1 }} + ingressClassName: {{ .Values.controller.ingressClassResource.name }} + {{- end }} + rules: + - host: www.example.com + http: + paths: + - pathType: Prefix + backend: + service: + name: exampleService + port: + number: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/_helpers.tpl b/roles/cmoa_os_setting/files/ingress-nginx/templates/_helpers.tpl new file mode 100644 index 0000000..e69de0c --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/_helpers.tpl @@ -0,0 +1,185 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ingress-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ingress-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Container SecurityContext. +*/}} +{{- define "controller.containerSecurityContext" -}} +{{- if .Values.controller.containerSecurityContext -}} +{{- toYaml .Values.controller.containerSecurityContext -}} +{{- else -}} +capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +runAsUser: {{ .Values.controller.image.runAsUser }} +allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} +{{- end }} +{{- end -}} + +{{/* +Get specific image +*/}} +{{- define "ingress-nginx.image" -}} +{{- if .chroot -}} +{{- printf "%s-chroot" .image -}} +{{- else -}} +{{- printf "%s" .image -}} +{{- end }} +{{- end -}} + +{{/* +Get specific image digest +*/}} +{{- define "ingress-nginx.imageDigest" -}} +{{- if .chroot -}} +{{- if .digestChroot -}} +{{- printf "@%s" .digestChroot -}} +{{- end }} +{{- else -}} +{{ if .digest -}} +{{- printf "@%s" .digest -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.controller.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "ingress-nginx.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "ingress-nginx.labels" -}} +helm.sh/chart: {{ include "ingress-nginx.chart" . }} +{{ include "ingress-nginx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/part-of: {{ template "ingress-nginx.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "ingress-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "ingress-nginx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Check the ingress controller version tag is at most three versions behind the last release +*/}} +{{- define "isControllerTagValid" -}} +{{- if not (semverCompare ">=0.27.0-0" .Values.controller.image.tag) -}} +{{- fail "Controller container image tag should be 0.27.0 or higher" -}} +{{- end -}} +{{- end -}} + +{{/* +IngressClass parameters. +*/}} +{{- define "ingressClass.parameters" -}} + {{- if .Values.controller.ingressClassResource.parameters -}} + parameters: +{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}} + {{ end }} +{{- end -}} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/_params.tpl b/roles/cmoa_os_setting/files/ingress-nginx/templates/_params.tpl new file mode 100644 index 0000000..305ce0d --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/_params.tpl @@ -0,0 +1,62 @@ +{{- define "ingress-nginx.params" -}} +- /nginx-ingress-controller +{{- if .Values.defaultBackend.enabled }} +- --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }} +{{- end }} +{{- if and .Values.controller.publishService.enabled .Values.controller.service.enabled }} +{{- if .Values.controller.service.external.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} +{{- else if .Values.controller.service.internal.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}-internal +{{- end }} +{{- end }} +- --election-id={{ .Values.controller.electionID }} +- --controller-class={{ .Values.controller.ingressClassResource.controllerValue }} +{{- if .Values.controller.ingressClass }} +- --ingress-class={{ .Values.controller.ingressClass }} +{{- end }} +- --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.tcp }} +- --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp +{{- end }} +{{- if .Values.udp }} +- --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp +{{- end }} +{{- if .Values.controller.scope.enabled }} +- --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }} +{{- end }} +{{- if and (not .Values.controller.scope.enabled) .Values.controller.scope.namespaceSelector }} +- --watch-namespace-selector={{ default "" .Values.controller.scope.namespaceSelector }} +{{- end }} +{{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} +- --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} +- --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} +- --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }} +- --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }} +{{- end }} +{{- if .Values.controller.maxmindLicenseKey }} +- --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} +{{- end }} +{{- if .Values.controller.healthCheckHost }} +- --healthz-host={{ .Values.controller.healthCheckHost }} +{{- end }} +{{- if not (eq .Values.controller.healthCheckPath "/healthz") }} +- --health-check-path={{ .Values.controller.healthCheckPath }} +{{- end }} +{{- if .Values.controller.ingressClassByName }} +- --ingress-class-by-name=true +{{- end }} +{{- if .Values.controller.watchIngressWithoutClass }} +- --watch-ingress-without-class=true +{{- end }} +{{- range $key, $value := .Values.controller.extraArgs }} +{{- /* Accept keys without values or with false as value */}} +{{- if eq ($value | quote | len) 2 }} +- --{{ $key }} +{{- else }} +- --{{ $key }}={{ $value }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..5659a1f --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + {{- with .Values.controller.admissionWebhooks.existingPsp }} + - {{ . }} + {{- else }} + - {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..abf17fb --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..7558e0b --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: create + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.createSecretJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..0528215 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,81 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: patch + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - patch + - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.admissionWebhooks.extraEnvs }} + {{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.securityContext }} + securityContext: + {{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..70edde3 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..795bac6 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..698c5c8 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..eae4751 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..8caffcb --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,48 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + {{- if .Values.controller.admissionWebhooks.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: {{ .Values.controller.admissionWebhooks.failurePolicy | default "Fail" }} + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: {{ .Release.Namespace | quote }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + path: /networking/v1/ingresses + {{- if .Values.controller.admissionWebhooks.timeoutSeconds }} + timeoutSeconds: {{ .Values.controller.admissionWebhooks.timeoutSeconds }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.admissionWebhooks.namespaceSelector | nindent 6 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.objectSelector }} + objectSelector: {{ toYaml .Values.controller.admissionWebhooks.objectSelector | nindent 6 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrole.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrole.yaml new file mode 100644 index 0000000..0e725ec --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrole.yaml @@ -0,0 +1,94 @@ +{{- if .Values.rbac.create }} + +{{- if and .Values.rbac.scope (not .Values.controller.scope.enabled) -}} + {{ required "Invalid configuration: 'rbac.scope' should be equal to 'controller.scope.enabled' (true/false)." (index (dict) ".") }} +{{- end }} + +{{- if not .Values.rbac.scope -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets +{{- if not .Values.controller.scope.enabled }} + - namespaces +{{- end}} + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +{{- end }} + +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..acbbd8b --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml new file mode 100644 index 0000000..dfd49a1 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-addheaders.yaml @@ -0,0 +1,14 @@ +{{- if .Values.controller.addHeaders -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers + namespace: {{ .Release.Namespace }} +data: {{ toYaml .Values.controller.addHeaders | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml new file mode 100644 index 0000000..f8d15fa --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-proxyheaders.yaml @@ -0,0 +1,19 @@ +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers + namespace: {{ .Release.Namespace }} +data: +{{- if .Values.controller.proxySetHeaders }} +{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} +{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml new file mode 100644 index 0000000..0f6088e --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-tcp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.tcp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.tcp.annotations }} + annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-tcp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml new file mode 100644 index 0000000..3772ec5 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap-udp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.udp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.udp.annotations }} + annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-udp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.udp) . | nindent 2 }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap.yaml new file mode 100644 index 0000000..f28b26e --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-configmap.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.configAnnotations }} + annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}" +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.dhParam }} + ssl-dh-param: {{ printf "%s/%s" .Release.Namespace (include "ingress-nginx.controller.fullname" .) }} +{{- end }} +{{- range $key, $value := .Values.controller.config }} + {{- $key | nindent 2 }}: {{ $value | quote }} +{{- end }} + diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml new file mode 100644 index 0000000..80c268f --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-daemonset.yaml @@ -0,0 +1,223 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + + + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .Name }} + image: {{ .Image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-deployment.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-deployment.yaml new file mode 100644 index 0000000..5ad1867 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-deployment.yaml @@ -0,0 +1,228 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + strategy: + {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName | quote }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: http-metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .name }} + image: {{ .image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + volumeMounts: + - name: modules + mountPath: /modules_mount + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-hpa.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-hpa.yaml new file mode 100644 index 0000000..e0979f1 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-hpa.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +{{- if not .Values.controller.keda.enabled }} + +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + annotations: + {{- with .Values.controller.autoscaling.annotations }} + {{- toYaml . | trimSuffix "\n" | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ingress-nginx.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscalingTemplate }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- with .Values.controller.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} + diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml new file mode 100644 index 0000000..9492784 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-ingressclass.yaml @@ -0,0 +1,21 @@ +{{- if .Values.controller.ingressClassResource.enabled -}} +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ .Values.controller.ingressClassResource.name }} +{{- if .Values.controller.ingressClassResource.default }} + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +{{- end }} +spec: + controller: {{ .Values.controller.ingressClassResource.controllerValue }} + {{ template "ingressClass.parameters" . }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-keda.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-keda.yaml new file mode 100644 index 0000000..875157e --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-keda.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.controller.keda.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +# https://keda.sh/docs/ + +apiVersion: {{ .Values.controller.keda.apiVersion }} +kind: ScaledObject +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.controller.keda.scaledObject.annotations }} + annotations: {{ toYaml .Values.controller.keda.scaledObject.annotations | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: +{{- if eq .Values.controller.keda.apiVersion "keda.k8s.io/v1alpha1" }} + deploymentName: {{ include "ingress-nginx.controller.fullname" . }} +{{- else if eq .Values.controller.keda.apiVersion "keda.sh/v1alpha1" }} + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- end }} + pollingInterval: {{ .Values.controller.keda.pollingInterval }} + cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }} + minReplicaCount: {{ .Values.controller.keda.minReplicas }} + maxReplicaCount: {{ .Values.controller.keda.maxReplicas }} + triggers: +{{- with .Values.controller.keda.triggers }} +{{ toYaml . | indent 2 }} +{{ end }} + advanced: + restoreToOriginalReplicaCount: {{ .Values.controller.keda.restoreToOriginalReplicaCount }} +{{- if .Values.controller.keda.behavior }} + horizontalPodAutoscalerConfig: + behavior: +{{ with .Values.controller.keda.behavior -}} +{{ toYaml . | indent 8 }} +{{ end }} + +{{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..8dfbe98 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (and (not .Values.controller.autoscaling.enabled) (gt (.Values.controller.replicaCount | int) 1)) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..78b5362 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-prometheusrules.yaml @@ -0,0 +1,21 @@ +{{- if and ( .Values.controller.metrics.enabled ) ( .Values.controller.metrics.prometheusRule.enabled ) ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "ingress-nginx.name" . }} + rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-psp.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-psp.yaml new file mode 100644 index 0000000..2e0499c --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-psp.yaml @@ -0,0 +1,94 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled (empty .Values.controller.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +{{- if .Values.controller.sysctls }} + allowedUnsafeSysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - {{ $sysctl }} + {{- end }} +{{- end }} + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + #- 'projected' + - 'secret' + #- 'downwardAPI' +{{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- end }} +{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.hostPort.enabled }} +{{- range $key, $value := .Values.controller.hostPort.ports }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-role.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-role.yaml new file mode 100644 index 0000000..330be8c --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-role.yaml @@ -0,0 +1,113 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.controller.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}] + {{- end }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..e846a11 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml new file mode 100644 index 0000000..aae3e15 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-internal.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.internal.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-internal + namespace: {{ .Release.Namespace }} +spec: + type: "{{ .Values.controller.service.type }}" +{{- if .Values.controller.service.internal.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.internal.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.internal.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.internal.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.internal.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml new file mode 100644 index 0000000..1c1d5bd --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-metrics.yaml @@ -0,0 +1,45 @@ +{{- if .Values.controller.metrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.service.labels }} + {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.metrics.service.type }} +{{- if .Values.controller.metrics.service.clusterIP }} + clusterIP: {{ .Values.controller.metrics.service.clusterIP }} +{{- end }} +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }} +{{- end }} + ports: + - name: http-metrics + port: {{ .Values.controller.metrics.service.servicePort }} + protocol: TCP + targetPort: http-metrics + {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }} + {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }} + nodePort: {{ .Values.controller.metrics.service.nodePort }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml new file mode 100644 index 0000000..2aae24f --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service-webhook.yaml @@ -0,0 +1,40 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.admissionWebhooks.service.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.admissionWebhooks.service.type }} +{{- if .Values.controller.admissionWebhooks.service.clusterIP }} + clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: https-webhook + port: 443 + targetPort: webhook + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service.yaml new file mode 100644 index 0000000..2b28196 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-service.yaml @@ -0,0 +1,101 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.external.enabled -}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.service.type }} +{{- if .Values.controller.service.clusterIP }} + clusterIP: {{ .Values.controller.service.clusterIP }} +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} +{{- end }} +{{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ .Values.controller.service.ipFamilyPolicy }} +{{- end }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilies }} + ipFamilies: {{ toYaml .Values.controller.service.ipFamilies | nindent 4 }} +{{- end }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..824b2a1 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.serviceAccount.annotations }} + annotations: + {{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..973d36b --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.relabelings }} + relabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} +{{- if .Values.controller.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.controller.metrics.serviceMonitor.jobLabel | quote }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }} +{{- else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- range .Values.controller.metrics.serviceMonitor.targetLabels }} + - {{ . }} + {{- end }} +{{- end }} + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml new file mode 100644 index 0000000..f74c2fb --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/controller-wehbooks-networkpolicy.yaml @@ -0,0 +1,19 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +{{- if .Values.controller.admissionWebhooks.networkPolicyEnabled }} + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-webhooks-allow + namespace: {{ .Release.Namespace }} +spec: + ingress: + - {} + podSelector: + matchLabels: + app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} + policyTypes: + - Ingress + +{{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..fd3e96e --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-deployment.yaml @@ -0,0 +1,118 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend +{{- if not .Values.defaultBackend.autoscaling.enabled }} + replicas: {{ .Values.defaultBackend.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.podLabels }} + {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: {{ .Values.defaultBackend.priorityClassName }} + {{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - name: {{ template "ingress-nginx.name" . }}-default-backend + {{- with .Values.defaultBackend.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + {{- if .Values.defaultBackend.extraArgs }} + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- /* Accept keys without values or with false as value */}} + {{- if eq ($value | quote | len) 2 }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + {{- end }} + securityContext: + capabilities: + drop: + - ALL + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + runAsNonRoot: {{ .Values.defaultBackend.image.runAsNonRoot }} + allowPrivilegeEscalation: {{ .Values.defaultBackend.image.allowPrivilegeEscalation }} + readOnlyRootFilesystem: {{ .Values.defaultBackend.image.readOnlyRootFilesystem}} + {{- if .Values.defaultBackend.extraEnvs }} + env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + {{- if .Values.defaultBackend.extraVolumeMounts }} + volumeMounts: {{- toYaml .Values.defaultBackend.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.resources }} + resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.defaultBackend.extraVolumes }} + volumes: {{ toYaml .Values.defaultBackend.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml new file mode 100644 index 0000000..594d265 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-hpa.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }} + metrics: +{{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..00891ce --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- if .Values.defaultBackend.enabled -}} +{{- if or (gt (.Values.defaultBackend.replicaCount | int) 1) (gt (.Values.defaultBackend.autoscaling.minReplicas | int) 1) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml new file mode 100644 index 0000000..c144c8f --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-psp.yaml @@ -0,0 +1,38 @@ +{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }} +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled (empty .Values.defaultBackend.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-backend + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-role.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-role.yaml new file mode 100644 index 0000000..a2b457c --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-role.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.defaultBackend.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend] + {{- end }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..dbaa516 --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-service.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-service.yaml new file mode 100644 index 0000000..5f1d09a --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.defaultBackend.service.type }} +{{- if .Values.defaultBackend.service.clusterIP }} + clusterIP: {{ .Values.defaultBackend.service.clusterIP }} +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: default-backend +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..b45a95a --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +automountServiceAccountToken: {{ .Values.defaultBackend.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml b/roles/cmoa_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml new file mode 100644 index 0000000..12e7a4f --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/templates/dh-param-secret.yaml @@ -0,0 +1,10 @@ +{{- with .Values.dhParam -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "ingress-nginx.controller.fullname" $ }} + labels: + {{- include "ingress-nginx.labels" $ | nindent 4 }} +data: + dhparam.pem: {{ . }} +{{- end }} diff --git a/roles/cmoa_os_setting/files/ingress-nginx/values.yaml b/roles/cmoa_os_setting/files/ingress-nginx/values.yaml new file mode 100644 index 0000000..9ec174f --- /dev/null +++ b/roles/cmoa_os_setting/files/ingress-nginx/values.yaml @@ -0,0 +1,944 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + image: + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.3.1" + digest: sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974 + digestChroot: sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1 + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + # -- Configures the controller container name + containerName: controller + + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + + # -- Optionally customize the pod hostname. + hostname: {} + + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: true + + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not + enabled: false + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + + # -- Election ID to use for status update + electionID: ingress-controller-leader + + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Name of the ingressClass + name: nginx + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx" + + # -- Parameters is a link to a custom resource containing additional + # configuration for the controller. This is optional if the controller + # does not require extra parameters. + parameters: {} + + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security Context policies for controller pods + podSecurityContext: {} + + # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # -- Additional command line arguments to pass to nginx-ingress-controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the ingress nginx controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were + # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # It allows choosing the protocol for each backend specified in the Kubernetes service. + # See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 + # Will be ignored for Kubernetes versions older than 1.20 + ## + appProtocol: true + + annotations: {} + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + ## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + # sessionAffinity: "" + + ## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, + ## the service controller allocates a port from your cluster’s NodePort range. + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack-ness requested or required by this Service. Possible values are + # SingleStack, PreferDualStack or RequireDualStack. + # The ipFamilies and clusterIPs fields depend on the value of this field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilyPolicy: "SingleStack" + + # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically + # based on cluster configuration and the ipFamilyPolicy field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilies: + - IPv4 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + ## type: NodePort + ## nodePorts: + ## http: 32080 + ## https: 32443 + ## tcp: + ## 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + external: + enabled: true + + internal: + # -- Enables an additional internal load balancer (besides the external one). + enabled: false + # -- Annotations are mandatory for the load balancer to come up. Varies with the cloud service. + annotations: {} + + # loadBalancerIP: "" + + # -- Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. + loadBalancerSourceRanges: [] + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + extraModules: [] + ## Modules, which are mounted into the core nginx image + # - name: opentelemetry + # image: registry.k8s.io/ingress-nginx/opentelemetry:v20220801-g00ee51f09@sha256:482562feba02ad178411efc284f8eb803a185e3ea5588b6111ccbc20b816b427 + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + admissionWebhooks: + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + + # -- Use an existing PSP instead of creating one + existingPsp: "" + networkPolicyEnabled: false + + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + createSecretJob: + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + patchWebhookJob: + resources: {} + + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.3.0 + digest: sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + securityContext: + runAsNonRoot: true + runAsUser: 2000 + fsGroup: 2000 + + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + + priorityClassName: "" + +# -- Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + ## + enabled: false + + name: defaultbackend + image: + registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + extraArgs: {} + + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + # -- Security Context policies for controller pods + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + podSecurityContext: {} + + # -- Security Context policies for controller main container. + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + containerSecurityContext: {} + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + service: + annotations: {} + + # clusterIP: "" + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} + +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false + +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} + +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# 53: "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" + +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: diff --git a/roles/cmoa_os_setting/handlers/main.yml b/roles/cmoa_os_setting/handlers/main.yml new file mode 100644 index 0000000..4bf601f --- /dev/null +++ b/roles/cmoa_os_setting/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: Reload systemd configuration + service: + daemon_reload: True + +- name: Restart containerd service + service: + name: containerd + enabled: true + state: restarted diff --git a/roles/cmoa_os_setting/meta/main.yml b/roles/cmoa_os_setting/meta/main.yml new file mode 100644 index 0000000..c572acc --- /dev/null +++ b/roles/cmoa_os_setting/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/roles/cmoa_os_setting/tasks/00-centos-os-main.yml b/roles/cmoa_os_setting/tasks/00-centos-os-main.yml new file mode 100644 index 0000000..0f35148 --- /dev/null +++ b/roles/cmoa_os_setting/tasks/00-centos-os-main.yml @@ -0,0 +1,73 @@ +--- +- name: Update and upgrade yum packages + yum: + name: "*" + state: latest + +- name: Install yum packages + yum: + name: ['cloud-utils', 'ca-certificates', 'socat', 'conntrack', 'gnupg', 'bash-completion'] + state: present + +- name: Disable firewalld + systemd: name=firewalld state=stopped + ignore_errors: yes + tags: + - install + - atomic + - firewalld + +- name: Disable SWAP since kubernetes can't work with swap enabled (1/2) + command: 'swapoff -a' + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + replace: '# \1' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/modules-load.d/k8s2.conf + line: "{{ item }}" + create: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + modprobe: + name: "{{ item }}" + state: present + become: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/sysctl.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'net.bridge.bridge-nf-call-iptables = 1' + - 'net.bridge.bridge-nf-call-ip6tables = 1' + - 'net.ipv4.ip_forward = 1' + +- name: Disable net.bridge.bridge-nf-call-iptables + sysctl: + name: "{{ item }}" + value: 1 + with_items: + - 'net.bridge.bridge-nf-call-iptables' + - 'net.bridge.bridge-nf-call-ip6tables' + +- name: Disable net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: "1" + +- name: Setting hosts file + template: + src: hosts.j2 + dest: /etc/hosts diff --git a/roles/cmoa_os_setting/tasks/00-ubuntu-os-main.yml b/roles/cmoa_os_setting/tasks/00-ubuntu-os-main.yml new file mode 100644 index 0000000..8c460d5 --- /dev/null +++ b/roles/cmoa_os_setting/tasks/00-ubuntu-os-main.yml @@ -0,0 +1,71 @@ +--- +- name: Update and upgrade apt packages + apt: + upgrade: yes + update_cache: yes + force_apt_get: yes + cache_valid_time: 86400 + +- name: Install apt packages + apt: + name: ['cloud-utils', 'apt-transport-https', 'ca-certificates', 'curl', 'socat', 'conntrack', 'gnupg', 'lsb-release', 'bash-completion', 'chrony'] + state: present + +- name: Disable ufw + command: 'ufw disable' + when: ansible_distribution_version == '20.04' + +- name: Disable SWAP since kubernetes can't work with swap enabled (1/2) + command: 'swapoff -a' + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled (2/2) + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + replace: '# \1' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/modules-load.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + modprobe: + name: "{{ item }}" + state: present + become: true + with_items: + - 'overlay' + - 'br_netfilter' + +- name: Add br_netfilter to module autoload + lineinfile: + path: /etc/sysctl.d/k8s.conf + line: "{{ item }}" + create: true + with_items: + - 'net.bridge.bridge-nf-call-iptables = 1' + - 'net.bridge.bridge-nf-call-ip6tables = 1' + - 'net.ipv4.ip_forward = 1' + +- name: Disable net.bridge.bridge-nf-call-iptables + sysctl: + name: "{{ item }}" + value: 1 + with_items: + - 'net.bridge.bridge-nf-call-iptables' + - 'net.bridge.bridge-nf-call-ip6tables' + +- name: Disable net.ipv4.ip_forward + sysctl: + name: net.ipv4.ip_forward + value: "1" + +- name: Setting hosts file + template: + src: hosts.j2 + dest: /etc/hosts diff --git a/roles/cmoa_os_setting/tasks/01-centos-os-runtime.yml b/roles/cmoa_os_setting/tasks/01-centos-os-runtime.yml new file mode 100644 index 0000000..35a0cb6 --- /dev/null +++ b/roles/cmoa_os_setting/tasks/01-centos-os-runtime.yml @@ -0,0 +1,45 @@ +--- +- name: Add containerd yum repository + command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + yum: + name: ['containerd'] + state: present + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + +- name: Add kubernetes yum repository + ansible.builtin.yum_repository: + name: kubernetes + description: kubernetes + baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 + enabled: 1 + gpgcheck: 1 + gpgkey: https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + +- name: Install kubernetes utils + ansible.builtin.yum: + name: ['kubelet-{{kubernetes_version}}','kubeadm-{{kubernetes_version}}','kubectl-{{kubernetes_version}}'] + exclude: kubernetes \ No newline at end of file diff --git a/roles/cmoa_os_setting/tasks/01-ubuntu-os-runtime.yml b/roles/cmoa_os_setting/tasks/01-ubuntu-os-runtime.yml new file mode 100644 index 0000000..556485e --- /dev/null +++ b/roles/cmoa_os_setting/tasks/01-ubuntu-os-runtime.yml @@ -0,0 +1,78 @@ +--- +- name: Add docker apt key + apt_key: + url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg + +- name: Add docker apt repository + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable + filename: docker + register: containerd_apt_repo_task + +- name: apt list --upgradable + command: apt list --upgradable + when: containerd_apt_repo_task.changed + +- name: apt update + apt: + update_cache: yes + when: containerd_apt_repo_task.changed + +- name: Create containerd configuration directory + file: + path: /etc/containerd + state: directory + +- name: Configure containerd + template: + src: config.toml.j2 + dest: /etc/containerd/config.toml + notify: + - Restart containerd service + +- name: Install required packages + apt: + name: + - containerd.io + notify: + - Reload systemd configuration + - Restart containerd service + +- meta: flush_handlers + +- name: Enable containerd service + service: + name: containerd + enabled: True + state: started + +- name: Install kubernetes + block: + - name: 'Add kubernetes repo key' + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + become: true + - name: Add kubernetes repository + apt_repository: + repo: deb http://apt.kubernetes.io kubernetes-xenial main + state: present + filename: 'kubernetes' + become: true + - name: Install kubernetes components + apt: + name: ['kubelet={{kubernetes_version}}-*', 'kubeadm={{kubernetes_version}}-*', 'kubectl={{kubernetes_version}}-*'] + state: present + update_cache: yes + force: yes + dpkg_options: force-downgrade + +- name: Hold kubernetes packages + dpkg_selections: + name: "{{item}}" + selection: hold + with_items: + - kubelet + - kubectl + - kubeadm + diff --git a/roles/cmoa_os_setting/tasks/02-k8s-main.yml b/roles/cmoa_os_setting/tasks/02-k8s-main.yml new file mode 100644 index 0000000..ede7119 --- /dev/null +++ b/roles/cmoa_os_setting/tasks/02-k8s-main.yml @@ -0,0 +1,45 @@ +--- +- name: Enable kubelet service + systemd: + name: kubelet + enabled: true + masked: false + +- name: Check if Kubernetes has already been initialized. + stat: + path: /etc/kubernetes/admin.conf + register: kubernetes_init_stat + +# Set up master. +- include_tasks: 03-k8s-master.yml + when: kubernetes_role == 'master' + +# Set up nodes. +- name: Get the kubeadm join command from the Kubernetes master. + command: kubeadm token create --print-join-command + changed_when: false + when: kubernetes_role == 'master' + register: kubernetes_join_command_result + +- name: Get kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/ansible_config + flat: yes + when: kubernetes_role == 'master' + +- name: Set the kubeadm join command globally. + set_fact: + kubernetes_join_command: > + {{ kubernetes_join_command_result.stdout }} + {{ kubernetes_join_command_extra_opts }} + when: kubernetes_join_command_result.stdout is defined + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] }}" + +- include_tasks: 05-k8s-node.yml + when: kubernetes_role == 'node' + +- include_tasks: 06-worker-directory.yml + when: kubernetes_role == 'node' diff --git a/roles/cmoa_os_setting/tasks/03-k8s-master.yml b/roles/cmoa_os_setting/tasks/03-k8s-master.yml new file mode 100644 index 0000000..954cdbb --- /dev/null +++ b/roles/cmoa_os_setting/tasks/03-k8s-master.yml @@ -0,0 +1,45 @@ +--- +- name: Initialize Kubernetes master with kubeadm init. + command: > + kubeadm init + --pod-network-cidr={{ kubernetes_pod_network.cidr }} + --apiserver-advertise-address={{ kubernetes_apiserver_advertise_address | default(ansible_default_ipv4.address, true) }} + {{ kubernetes_kubeadm_init_extra_opts }} + register: kubeadmin_init + when: not kubernetes_init_stat.stat.exists + +- name: Print the init output to screen. + debug: + var: kubeadmin_init.stdout + verbosity: 2 + when: not kubernetes_init_stat.stat.exists + +- name: Ensure .kube directory exists. + file: + path: ~/.kube + state: directory + +- name: Symlink the kubectl admin.conf to ~/.kube/conf. + file: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/config + state: link + force: yes + +- name: copy the kubectl config to ~/.kube/ansible_config + copy: + src: /etc/kubernetes/admin.conf + dest: ~/.kube/ansible_config + remote_src: true + +- name: Configure Calico networking and Metric Server + include_tasks: 04-k8s-master-yaml.yml + +- name: Kubectl Cheat Sheet + lineinfile: + path: ~/.bashrc + line: "{{ item }}" + with_items: + - source <(kubectl completion bash) + - alias k=kubectl + - complete -o default -F __start_kubectl k diff --git a/roles/cmoa_os_setting/tasks/04-k8s-master-yaml.yml b/roles/cmoa_os_setting/tasks/04-k8s-master-yaml.yml new file mode 100644 index 0000000..996a122 --- /dev/null +++ b/roles/cmoa_os_setting/tasks/04-k8s-master-yaml.yml @@ -0,0 +1,15 @@ +--- +- name: Configure Calico networking. + command: "{{ item }}" + with_items: + - kubectl apply -f {{ kubernetes_calico_manifest_file }} + register: calico_result + changed_when: "'created' in calico_result.stdout" + when: kubernetes_pod_network.cni == 'calico' + +- name: Configure Metric Server + command: "{{ item }}" + with_items: + - kubectl apply -f {{ kubernetes_metric_server_file }} + register: metric_server_result + changed_when: "'created' in metric_server_result.stdout" diff --git a/roles/cmoa_os_setting/tasks/05-k8s-node.yml b/roles/cmoa_os_setting/tasks/05-k8s-node.yml new file mode 100644 index 0000000..304cbf1 --- /dev/null +++ b/roles/cmoa_os_setting/tasks/05-k8s-node.yml @@ -0,0 +1,6 @@ +--- +- name: Join node to Kubernetes master + shell: > + {{ kubernetes_join_command }} + creates=/etc/kubernetes/kubelet.conf + tags: ['skip_ansible_lint'] diff --git a/roles/cmoa_os_setting/tasks/06-worker-directory.yml b/roles/cmoa_os_setting/tasks/06-worker-directory.yml new file mode 100644 index 0000000..5b14eab --- /dev/null +++ b/roles/cmoa_os_setting/tasks/06-worker-directory.yml @@ -0,0 +1,43 @@ +--- +- name: make worker1 directory + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: u+rwx,g+rwx,o+rwx + recurse: yes + owner: root + group: root + with_items: + - /media/data/minio/pv1 + - /media/data/minio/pv2 + - /media/data/postgres/postgres-data-0 + - /media/data/elasticsearch/elasticsearch-data-0 + - /media/data/zookeeper/zookeeper-data-0 + - /media/data/kafka/kafka-data-0 + - /media/cloudmoa/ingester/ingester-data-1 + - /media/data/redis/redis-data-0 + - /media/data/redis/redis-data-1 + - /media/data/rabbitmq + when: inventory_hostname in groups["worker1"] + +- name: make worker2 directory + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: u+rwx,g+rwx,o+rwx + recurse: yes + owner: root + group: root + with_items: + - /media/data/minio/pv3 + - /media/data/minio/pv4 + - /media/data/elasticsearch/elasticsearch-data-1 + - /media/data/zookeeper/zookeeper-data-1 + - /media/data/zookeeper/zookeeper-data-2 + - /media/data/kafka/kafka-data-1 + - /media/data/kafka/kafka-data-2 + - /media/cloudmoa/ingester/ingester-data-2 + - /media/cloudmoa/ingester/ingester-data-3 + - /media/data/redis/redis-data-1 + - /media/data/redis/redis-data-2 + when: inventory_hostname in groups["worker2"] diff --git a/roles/cmoa_os_setting/tasks/main.yml b/roles/cmoa_os_setting/tasks/main.yml new file mode 100644 index 0000000..d73559e --- /dev/null +++ b/roles/cmoa_os_setting/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- include: 00-centos-os-main.yml + tags: centos + when: ansible_distribution == 'CentOS' + +- include: 00-ubuntu-os-main.yml + tags: ubuntu + when: ansible_distribution == 'Ubuntu' + +- include: 01-centos-os-runtime.yml + tags: centos + when: ansible_distribution == 'CentOS' + +- include: 01-ubuntu-os-runtime.yml + tags: ubuntu + when: ansible_distribution == 'Ubuntu' + +- include: 02-k8s-main.yml + tags: k8s-main diff --git a/roles/cmoa_os_setting/templates/config.toml.j2 b/roles/cmoa_os_setting/templates/config.toml.j2 new file mode 100644 index 0000000..0217565 --- /dev/null +++ b/roles/cmoa_os_setting/templates/config.toml.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +{% from 'yaml2toml_macro.j2' import yaml2toml with context -%} + +{{ yaml2toml(containerd_config) }} diff --git a/roles/cmoa_os_setting/templates/hosts.j2 b/roles/cmoa_os_setting/templates/hosts.j2 new file mode 100644 index 0000000..18804b7 --- /dev/null +++ b/roles/cmoa_os_setting/templates/hosts.j2 @@ -0,0 +1,6 @@ +127.0.0.1 localhost +:: 1 localhost + +{% for host in groups.all %} +{{ hostvars[host].ansible_default_ipv4.address }} {{ hostvars[host].ansible_fqdn }} {{ hostvars[host].ansible_hostname }} +{%endfor%} diff --git a/roles/cmoa_os_setting/templates/yaml2toml_macro.j2 b/roles/cmoa_os_setting/templates/yaml2toml_macro.j2 new file mode 100644 index 0000000..33f69d0 --- /dev/null +++ b/roles/cmoa_os_setting/templates/yaml2toml_macro.j2 @@ -0,0 +1,58 @@ +{%- macro yaml2inline_toml(item, depth) -%} + {%- if item is string or item is number -%} + {#- First, process all primitive types. -#} + {{ item | to_json }} + {%- elif item is mapping -%} + {#- Second, process all mappings. -#} + {#- Note that inline mappings must not contain newlines (except inside contained lists). -#} + {{ "{" }} + {%- for key, value in item.items() | sort -%} + {{ " " + + (key | to_json) + + " = " + + yaml2inline_toml(value, depth) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ " }" }} + {%- else -%} + {#- Third, process all lists. -#} + {%- if item | length == 0 -%}{{ "[]" }}{%- else -%} + {{ "[" }} + {%- for entry in item -%} + {{ "\n" + + (" " * (depth + 1)) + + yaml2inline_toml(entry, depth + 1) + }} + {%- if not loop.last -%}{{ "," }}{%- endif -%} + {%- endfor -%} + {{ "\n" + (" " * depth) + "]" }} + {%- endif -%} + {%- endif -%} +{%- endmacro -%} + +{%- macro yaml2toml(item, super_keys=[]) -%} + {%- for key, value in item.items() | sort -%} + {%- if value is not mapping -%} + {#- First, process all non-mappings. -#} + {{ (" " * (super_keys | length)) + + (key | to_json) + + " = " + + (yaml2inline_toml(value, super_keys | length)) + + "\n" + }} + {%- endif -%} + {%- endfor -%} + {%- for key, value in item.items() | sort -%} + {%- if value is mapping -%} + {#- Second, process all mappings. -#} + {{ "\n" + + (" " * (super_keys | length)) + + "[" + + ((super_keys+[key]) | map('to_json') | join(".")) + + "]\n" + + yaml2toml(value, super_keys+[key]) + }} + {%- endif -%} + {%- endfor -%} +{%- endmacro -%} diff --git a/roles/cmoa_os_setting/tests/inventory b/roles/cmoa_os_setting/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/roles/cmoa_os_setting/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/roles/cmoa_os_setting/tests/test.yml b/roles/cmoa_os_setting/tests/test.yml new file mode 100644 index 0000000..191e731 --- /dev/null +++ b/roles/cmoa_os_setting/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - apache diff --git a/roles/cmoa_os_setting/vars/main.yml b/roles/cmoa_os_setting/vars/main.yml new file mode 100644 index 0000000..2aa5032 --- /dev/null +++ b/roles/cmoa_os_setting/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for apache diff --git a/ssh_key/authorized_keys.yml b/ssh_key/authorized_keys.yml new file mode 100644 index 0000000..d01e291 --- /dev/null +++ b/ssh_key/authorized_keys.yml @@ -0,0 +1,11 @@ +--- +- hosts: cluster + remote_user: root + tasks: + - name: key add + authorized_key: + user: root + state: present + key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}" + manage_dir: False + diff --git a/ssh_key/key_test.sh b/ssh_key/key_test.sh new file mode 100755 index 0000000..93088f1 --- /dev/null +++ b/ssh_key/key_test.sh @@ -0,0 +1,5 @@ +#!/usr/bin/expect -f +spawn ssh-copy-id root@$argv +expect "password:" +send "password\n" +expect eof